diff --git a/bindgen-tests/tests/expectations/tests/bitfield-32bit-overflow.rs b/bindgen-tests/tests/expectations/tests/bitfield-32bit-overflow.rs index 783f0ef7a9..6ec853994a 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield-32bit-overflow.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield-32bit-overflow.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,17 +108,32 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift } else { - i + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -158,13 +501,15 @@ const _: () = { impl MuchBitfield { #[inline] pub fn m0(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_m0(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -173,7 +518,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -183,23 +531,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m1(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u8) + } } #[inline] pub fn set_m1(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -208,7 +556,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) as u8, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -218,23 +569,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m2(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u8) + } } #[inline] pub fn set_m2(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -243,7 +594,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) as u8, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -253,23 +607,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m3(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<3usize, 1u8>() as u8) + } } #[inline] pub fn set_m3(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(3usize, 1u8, val as u64) + self._bitfield_1.set_const::<3usize, 1u8>(val as u64) } } #[inline] @@ -278,7 +632,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 3usize, 1u8) as u8, + >>::raw_get_const::< + 3usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -288,23 +645,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 3usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m4(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 1u8>() as u8) + } } #[inline] pub fn set_m4(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 1u8, val as u64) + self._bitfield_1.set_const::<4usize, 1u8>(val as u64) } } #[inline] @@ -313,7 +670,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 1u8) as u8, + >>::raw_get_const::< + 4usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -323,23 +683,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m5(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<5usize, 1u8>() as u8) + } } #[inline] pub fn set_m5(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(5usize, 1u8, val as u64) + self._bitfield_1.set_const::<5usize, 1u8>(val as u64) } } #[inline] @@ -348,7 +708,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 5usize, 1u8) as u8, + >>::raw_get_const::< + 5usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -358,23 +721,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 5usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m6(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<6usize, 1u8>() as u8) + } } #[inline] pub fn set_m6(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(6usize, 1u8, val as u64) + self._bitfield_1.set_const::<6usize, 1u8>(val as u64) } } #[inline] @@ -383,7 +746,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 6usize, 1u8) as u8, + >>::raw_get_const::< + 6usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -393,23 +759,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 6usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m7(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(7usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<7usize, 1u8>() as u8) + } } #[inline] pub fn set_m7(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(7usize, 1u8, val as u64) + self._bitfield_1.set_const::<7usize, 1u8>(val as u64) } } #[inline] @@ -418,7 +784,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 7usize, 1u8) as u8, + >>::raw_get_const::< + 7usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -428,23 +797,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 7usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m8(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<8usize, 1u8>() as u8) + } } #[inline] pub fn set_m8(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(8usize, 1u8, val as u64) + self._bitfield_1.set_const::<8usize, 1u8>(val as u64) } } #[inline] @@ -453,7 +822,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 8usize, 1u8) as u8, + >>::raw_get_const::< + 8usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -463,23 +835,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 8usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m9(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(9usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<9usize, 1u8>() as u8) + } } #[inline] pub fn set_m9(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(9usize, 1u8, val as u64) + self._bitfield_1.set_const::<9usize, 1u8>(val as u64) } } #[inline] @@ -488,7 +860,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 9usize, 1u8) as u8, + >>::raw_get_const::< + 9usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -498,23 +873,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 9usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m10(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(10usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<10usize, 1u8>() as u8) + } } #[inline] pub fn set_m10(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(10usize, 1u8, val as u64) + self._bitfield_1.set_const::<10usize, 1u8>(val as u64) } } #[inline] @@ -523,8 +898,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 10usize, 1u8) - as u8, + >>::raw_get_const::< + 10usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -534,23 +911,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 10usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m11(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(11usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<11usize, 1u8>() as u8) + } } #[inline] pub fn set_m11(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(11usize, 1u8, val as u64) + self._bitfield_1.set_const::<11usize, 1u8>(val as u64) } } #[inline] @@ -559,8 +936,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 11usize, 1u8) - as u8, + >>::raw_get_const::< + 11usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -570,23 +949,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 11usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m12(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(12usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<12usize, 1u8>() as u8) + } } #[inline] pub fn set_m12(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(12usize, 1u8, val as u64) + self._bitfield_1.set_const::<12usize, 1u8>(val as u64) } } #[inline] @@ -595,8 +974,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 12usize, 1u8) - as u8, + >>::raw_get_const::< + 12usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -606,23 +987,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 12usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m13(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(13usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<13usize, 1u8>() as u8) + } } #[inline] pub fn set_m13(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(13usize, 1u8, val as u64) + self._bitfield_1.set_const::<13usize, 1u8>(val as u64) } } #[inline] @@ -631,8 +1012,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 13usize, 1u8) - as u8, + >>::raw_get_const::< + 13usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -642,23 +1025,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 13usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m14(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(14usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<14usize, 1u8>() as u8) + } } #[inline] pub fn set_m14(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(14usize, 1u8, val as u64) + self._bitfield_1.set_const::<14usize, 1u8>(val as u64) } } #[inline] @@ -667,8 +1050,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 14usize, 1u8) - as u8, + >>::raw_get_const::< + 14usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -678,23 +1063,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 14usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m15(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(15usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<15usize, 1u8>() as u8) + } } #[inline] pub fn set_m15(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(15usize, 1u8, val as u64) + self._bitfield_1.set_const::<15usize, 1u8>(val as u64) } } #[inline] @@ -703,8 +1088,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 15usize, 1u8) - as u8, + >>::raw_get_const::< + 15usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -714,23 +1101,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 15usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m16(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<16usize, 1u8>() as u8) + } } #[inline] pub fn set_m16(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 1u8, val as u64) + self._bitfield_1.set_const::<16usize, 1u8>(val as u64) } } #[inline] @@ -739,8 +1126,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 16usize, 1u8) - as u8, + >>::raw_get_const::< + 16usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -750,23 +1139,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 16usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m17(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(17usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<17usize, 1u8>() as u8) + } } #[inline] pub fn set_m17(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(17usize, 1u8, val as u64) + self._bitfield_1.set_const::<17usize, 1u8>(val as u64) } } #[inline] @@ -775,8 +1164,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 17usize, 1u8) - as u8, + >>::raw_get_const::< + 17usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -786,23 +1177,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 17usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m18(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(18usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<18usize, 1u8>() as u8) + } } #[inline] pub fn set_m18(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(18usize, 1u8, val as u64) + self._bitfield_1.set_const::<18usize, 1u8>(val as u64) } } #[inline] @@ -811,8 +1202,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 18usize, 1u8) - as u8, + >>::raw_get_const::< + 18usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -822,23 +1215,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 18usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m19(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(19usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<19usize, 1u8>() as u8) + } } #[inline] pub fn set_m19(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(19usize, 1u8, val as u64) + self._bitfield_1.set_const::<19usize, 1u8>(val as u64) } } #[inline] @@ -847,8 +1240,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 19usize, 1u8) - as u8, + >>::raw_get_const::< + 19usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -858,23 +1253,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 19usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m20(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(20usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<20usize, 1u8>() as u8) + } } #[inline] pub fn set_m20(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(20usize, 1u8, val as u64) + self._bitfield_1.set_const::<20usize, 1u8>(val as u64) } } #[inline] @@ -883,8 +1278,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 20usize, 1u8) - as u8, + >>::raw_get_const::< + 20usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -894,23 +1291,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 20usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m21(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(21usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<21usize, 1u8>() as u8) + } } #[inline] pub fn set_m21(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(21usize, 1u8, val as u64) + self._bitfield_1.set_const::<21usize, 1u8>(val as u64) } } #[inline] @@ -919,8 +1316,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 21usize, 1u8) - as u8, + >>::raw_get_const::< + 21usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -930,23 +1329,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 21usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m22(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(22usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<22usize, 1u8>() as u8) + } } #[inline] pub fn set_m22(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(22usize, 1u8, val as u64) + self._bitfield_1.set_const::<22usize, 1u8>(val as u64) } } #[inline] @@ -955,8 +1354,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 22usize, 1u8) - as u8, + >>::raw_get_const::< + 22usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -966,23 +1367,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 22usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m23(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(23usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<23usize, 1u8>() as u8) + } } #[inline] pub fn set_m23(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(23usize, 1u8, val as u64) + self._bitfield_1.set_const::<23usize, 1u8>(val as u64) } } #[inline] @@ -991,8 +1392,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 23usize, 1u8) - as u8, + >>::raw_get_const::< + 23usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1002,23 +1405,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 23usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m24(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<24usize, 1u8>() as u8) + } } #[inline] pub fn set_m24(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(24usize, 1u8, val as u64) + self._bitfield_1.set_const::<24usize, 1u8>(val as u64) } } #[inline] @@ -1027,8 +1430,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 24usize, 1u8) - as u8, + >>::raw_get_const::< + 24usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1038,23 +1443,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 24usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m25(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(25usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<25usize, 1u8>() as u8) + } } #[inline] pub fn set_m25(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(25usize, 1u8, val as u64) + self._bitfield_1.set_const::<25usize, 1u8>(val as u64) } } #[inline] @@ -1063,8 +1468,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 25usize, 1u8) - as u8, + >>::raw_get_const::< + 25usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1074,23 +1481,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 25usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m26(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(26usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<26usize, 1u8>() as u8) + } } #[inline] pub fn set_m26(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(26usize, 1u8, val as u64) + self._bitfield_1.set_const::<26usize, 1u8>(val as u64) } } #[inline] @@ -1099,8 +1506,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 26usize, 1u8) - as u8, + >>::raw_get_const::< + 26usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1110,23 +1519,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 26usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m27(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(27usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<27usize, 1u8>() as u8) + } } #[inline] pub fn set_m27(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(27usize, 1u8, val as u64) + self._bitfield_1.set_const::<27usize, 1u8>(val as u64) } } #[inline] @@ -1135,8 +1544,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 27usize, 1u8) - as u8, + >>::raw_get_const::< + 27usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1146,23 +1557,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 27usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m28(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(28usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<28usize, 1u8>() as u8) + } } #[inline] pub fn set_m28(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(28usize, 1u8, val as u64) + self._bitfield_1.set_const::<28usize, 1u8>(val as u64) } } #[inline] @@ -1171,8 +1582,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 28usize, 1u8) - as u8, + >>::raw_get_const::< + 28usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1182,23 +1595,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 28usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m29(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(29usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<29usize, 1u8>() as u8) + } } #[inline] pub fn set_m29(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(29usize, 1u8, val as u64) + self._bitfield_1.set_const::<29usize, 1u8>(val as u64) } } #[inline] @@ -1207,8 +1620,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 29usize, 1u8) - as u8, + >>::raw_get_const::< + 29usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1218,23 +1633,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 29usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m30(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(30usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<30usize, 1u8>() as u8) + } } #[inline] pub fn set_m30(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(30usize, 1u8, val as u64) + self._bitfield_1.set_const::<30usize, 1u8>(val as u64) } } #[inline] @@ -1243,8 +1658,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 30usize, 1u8) - as u8, + >>::raw_get_const::< + 30usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1254,23 +1671,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 30usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m31(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(31usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<31usize, 1u8>() as u8) + } } #[inline] pub fn set_m31(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(31usize, 1u8, val as u64) + self._bitfield_1.set_const::<31usize, 1u8>(val as u64) } } #[inline] @@ -1279,8 +1696,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 31usize, 1u8) - as u8, + >>::raw_get_const::< + 31usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1290,23 +1709,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 31usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m32(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(32usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<32usize, 1u8>() as u8) + } } #[inline] pub fn set_m32(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(32usize, 1u8, val as u64) + self._bitfield_1.set_const::<32usize, 1u8>(val as u64) } } #[inline] @@ -1315,8 +1734,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 32usize, 1u8) - as u8, + >>::raw_get_const::< + 32usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1326,12 +1747,10 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 32usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -1372,302 +1791,269 @@ impl MuchBitfield { ) -> __BindgenBitfieldUnit<[u8; 5usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 5usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let m0: u8 = unsafe { ::std::mem::transmute(m0) }; - m0 as u64 - }, - ); + >({ + let m0: u8 = unsafe { ::std::mem::transmute(m0) }; + m0 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let m1: u8 = unsafe { ::std::mem::transmute(m1) }; - m1 as u64 - }, - ); + >({ + let m1: u8 = unsafe { ::std::mem::transmute(m1) }; + m1 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let m2: u8 = unsafe { ::std::mem::transmute(m2) }; - m2 as u64 - }, - ); + >({ + let m2: u8 = unsafe { ::std::mem::transmute(m2) }; + m2 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 3usize, 1u8, - { - let m3: u8 = unsafe { ::std::mem::transmute(m3) }; - m3 as u64 - }, - ); + >({ + let m3: u8 = unsafe { ::std::mem::transmute(m3) }; + m3 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 1u8, - { - let m4: u8 = unsafe { ::std::mem::transmute(m4) }; - m4 as u64 - }, - ); + >({ + let m4: u8 = unsafe { ::std::mem::transmute(m4) }; + m4 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 5usize, 1u8, - { - let m5: u8 = unsafe { ::std::mem::transmute(m5) }; - m5 as u64 - }, - ); + >({ + let m5: u8 = unsafe { ::std::mem::transmute(m5) }; + m5 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 6usize, 1u8, - { - let m6: u8 = unsafe { ::std::mem::transmute(m6) }; - m6 as u64 - }, - ); + >({ + let m6: u8 = unsafe { ::std::mem::transmute(m6) }; + m6 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 7usize, 1u8, - { - let m7: u8 = unsafe { ::std::mem::transmute(m7) }; - m7 as u64 - }, - ); + >({ + let m7: u8 = unsafe { ::std::mem::transmute(m7) }; + m7 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 8usize, 1u8, - { - let m8: u8 = unsafe { ::std::mem::transmute(m8) }; - m8 as u64 - }, - ); + >({ + let m8: u8 = unsafe { ::std::mem::transmute(m8) }; + m8 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 9usize, 1u8, - { - let m9: u8 = unsafe { ::std::mem::transmute(m9) }; - m9 as u64 - }, - ); + >({ + let m9: u8 = unsafe { ::std::mem::transmute(m9) }; + m9 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 10usize, 1u8, - { - let m10: u8 = unsafe { ::std::mem::transmute(m10) }; - m10 as u64 - }, - ); + >({ + let m10: u8 = unsafe { ::std::mem::transmute(m10) }; + m10 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 11usize, 1u8, - { - let m11: u8 = unsafe { ::std::mem::transmute(m11) }; - m11 as u64 - }, - ); + >({ + let m11: u8 = unsafe { ::std::mem::transmute(m11) }; + m11 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 12usize, 1u8, - { - let m12: u8 = unsafe { ::std::mem::transmute(m12) }; - m12 as u64 - }, - ); + >({ + let m12: u8 = unsafe { ::std::mem::transmute(m12) }; + m12 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 13usize, 1u8, - { - let m13: u8 = unsafe { ::std::mem::transmute(m13) }; - m13 as u64 - }, - ); + >({ + let m13: u8 = unsafe { ::std::mem::transmute(m13) }; + m13 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 14usize, 1u8, - { - let m14: u8 = unsafe { ::std::mem::transmute(m14) }; - m14 as u64 - }, - ); + >({ + let m14: u8 = unsafe { ::std::mem::transmute(m14) }; + m14 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 15usize, 1u8, - { - let m15: u8 = unsafe { ::std::mem::transmute(m15) }; - m15 as u64 - }, - ); + >({ + let m15: u8 = unsafe { ::std::mem::transmute(m15) }; + m15 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 1u8, - { - let m16: u8 = unsafe { ::std::mem::transmute(m16) }; - m16 as u64 - }, - ); + >({ + let m16: u8 = unsafe { ::std::mem::transmute(m16) }; + m16 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 17usize, 1u8, - { - let m17: u8 = unsafe { ::std::mem::transmute(m17) }; - m17 as u64 - }, - ); + >({ + let m17: u8 = unsafe { ::std::mem::transmute(m17) }; + m17 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 18usize, 1u8, - { - let m18: u8 = unsafe { ::std::mem::transmute(m18) }; - m18 as u64 - }, - ); + >({ + let m18: u8 = unsafe { ::std::mem::transmute(m18) }; + m18 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 19usize, 1u8, - { - let m19: u8 = unsafe { ::std::mem::transmute(m19) }; - m19 as u64 - }, - ); + >({ + let m19: u8 = unsafe { ::std::mem::transmute(m19) }; + m19 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 20usize, 1u8, - { - let m20: u8 = unsafe { ::std::mem::transmute(m20) }; - m20 as u64 - }, - ); + >({ + let m20: u8 = unsafe { ::std::mem::transmute(m20) }; + m20 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 21usize, 1u8, - { - let m21: u8 = unsafe { ::std::mem::transmute(m21) }; - m21 as u64 - }, - ); + >({ + let m21: u8 = unsafe { ::std::mem::transmute(m21) }; + m21 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 22usize, 1u8, - { - let m22: u8 = unsafe { ::std::mem::transmute(m22) }; - m22 as u64 - }, - ); + >({ + let m22: u8 = unsafe { ::std::mem::transmute(m22) }; + m22 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 23usize, 1u8, - { - let m23: u8 = unsafe { ::std::mem::transmute(m23) }; - m23 as u64 - }, - ); + >({ + let m23: u8 = unsafe { ::std::mem::transmute(m23) }; + m23 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 24usize, 1u8, - { - let m24: u8 = unsafe { ::std::mem::transmute(m24) }; - m24 as u64 - }, - ); + >({ + let m24: u8 = unsafe { ::std::mem::transmute(m24) }; + m24 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 25usize, 1u8, - { - let m25: u8 = unsafe { ::std::mem::transmute(m25) }; - m25 as u64 - }, - ); + >({ + let m25: u8 = unsafe { ::std::mem::transmute(m25) }; + m25 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 26usize, 1u8, - { - let m26: u8 = unsafe { ::std::mem::transmute(m26) }; - m26 as u64 - }, - ); + >({ + let m26: u8 = unsafe { ::std::mem::transmute(m26) }; + m26 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 27usize, 1u8, - { - let m27: u8 = unsafe { ::std::mem::transmute(m27) }; - m27 as u64 - }, - ); + >({ + let m27: u8 = unsafe { ::std::mem::transmute(m27) }; + m27 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 28usize, 1u8, - { - let m28: u8 = unsafe { ::std::mem::transmute(m28) }; - m28 as u64 - }, - ); + >({ + let m28: u8 = unsafe { ::std::mem::transmute(m28) }; + m28 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 29usize, 1u8, - { - let m29: u8 = unsafe { ::std::mem::transmute(m29) }; - m29 as u64 - }, - ); + >({ + let m29: u8 = unsafe { ::std::mem::transmute(m29) }; + m29 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 30usize, 1u8, - { - let m30: u8 = unsafe { ::std::mem::transmute(m30) }; - m30 as u64 - }, - ); + >({ + let m30: u8 = unsafe { ::std::mem::transmute(m30) }; + m30 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 31usize, 1u8, - { - let m31: u8 = unsafe { ::std::mem::transmute(m31) }; - m31 as u64 - }, - ); + >({ + let m31: u8 = unsafe { ::std::mem::transmute(m31) }; + m31 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 32usize, 1u8, - { - let m32: u8 = unsafe { ::std::mem::transmute(m32) }; - m32 as u64 - }, - ); + >({ + let m32: u8 = unsafe { ::std::mem::transmute(m32) }; + m32 as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield-large.rs b/bindgen-tests/tests/expectations/tests/bitfield-large.rs index 5d614ab936..db4fd63334 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield-large.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield-large.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift } else { - i + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -159,13 +502,15 @@ const _: () = { impl HasBigBitfield { #[inline] pub fn x(&self) -> i128 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 128u8) as u128) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 128u8>() as u128) + } } #[inline] pub fn set_x(&mut self, val: i128) { unsafe { let val: u128 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 128u8, val as u64) + self._bitfield_1.set_const::<0usize, 128u8>(val as u64) } } #[inline] @@ -174,8 +519,10 @@ impl HasBigBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 128u8) - as u128, + >>::raw_get_const::< + 0usize, + 128u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u128, ) } } @@ -185,26 +532,23 @@ impl HasBigBitfield { let val: u128 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 128u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(x: i128) -> __BindgenBitfieldUnit<[u8; 16usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 16usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 128u8, - { - let x: u128 = unsafe { ::std::mem::transmute(x) }; - x as u64 - }, - ); + >({ + let x: u128 = unsafe { ::std::mem::transmute(x) }; + x as u64 + }); __bindgen_bitfield_unit } } @@ -226,13 +570,15 @@ const _: () = { impl HasTwoBigBitfields { #[inline] pub fn x(&self) -> i128 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 80u8) as u128) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 80u8>() as u128) + } } #[inline] pub fn set_x(&mut self, val: i128) { unsafe { let val: u128 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 80u8, val as u64) + self._bitfield_1.set_const::<0usize, 80u8>(val as u64) } } #[inline] @@ -241,8 +587,10 @@ impl HasTwoBigBitfields { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 80u8) - as u128, + >>::raw_get_const::< + 0usize, + 80u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u128, ) } } @@ -252,23 +600,23 @@ impl HasTwoBigBitfields { let val: u128 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 80u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn y(&self) -> i128 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(80usize, 48u8) as u128) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<80usize, 48u8>() as u128) + } } #[inline] pub fn set_y(&mut self, val: i128) { unsafe { let val: u128 = ::std::mem::transmute(val); - self._bitfield_1.set(80usize, 48u8, val as u64) + self._bitfield_1.set_const::<80usize, 48u8>(val as u64) } } #[inline] @@ -277,8 +625,10 @@ impl HasTwoBigBitfields { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 80usize, 48u8) - as u128, + >>::raw_get_const::< + 80usize, + 48u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u128, ) } } @@ -288,35 +638,31 @@ impl HasTwoBigBitfields { let val: u128 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 80usize, 48u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(x: i128, y: i128) -> __BindgenBitfieldUnit<[u8; 16usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 16usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 80u8, - { - let x: u128 = unsafe { ::std::mem::transmute(x) }; - x as u64 - }, - ); + >({ + let x: u128 = unsafe { ::std::mem::transmute(x) }; + x as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 80usize, 48u8, - { - let y: u128 = unsafe { ::std::mem::transmute(y) }; - y as u64 - }, - ); + >({ + let y: u128 = unsafe { ::std::mem::transmute(y) }; + y as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield-linux-32.rs b/bindgen-tests/tests/expectations/tests/bitfield-linux-32.rs index 3e676c53b5..8bac523f99 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield-linux-32.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield-linux-32.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -154,13 +497,15 @@ pub struct Test { impl Test { #[inline] pub fn x(&self) -> u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 56u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 56u8>() as u64) + } } #[inline] pub fn set_x(&mut self, val: u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 56u8, val as u64) + self._bitfield_1.set_const::<0usize, 56u8>(val as u64) } } #[inline] @@ -169,8 +514,10 @@ impl Test { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 56u8) - as u64, + >>::raw_get_const::< + 0usize, + 56u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -180,23 +527,23 @@ impl Test { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 56u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn y(&self) -> u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(56usize, 8u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<56usize, 8u8>() as u64) + } } #[inline] pub fn set_y(&mut self, val: u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(56usize, 8u8, val as u64) + self._bitfield_1.set_const::<56usize, 8u8>(val as u64) } } #[inline] @@ -205,8 +552,10 @@ impl Test { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 56usize, 8u8) - as u64, + >>::raw_get_const::< + 56usize, + 8u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -216,35 +565,31 @@ impl Test { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 56usize, 8u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(x: u64, y: u64) -> __BindgenBitfieldUnit<[u8; 8usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 56u8, - { - let x: u64 = unsafe { ::std::mem::transmute(x) }; - x as u64 - }, - ); + >({ + let x: u64 = unsafe { ::std::mem::transmute(x) }; + x as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 56usize, 8u8, - { - let y: u64 = unsafe { ::std::mem::transmute(y) }; - y as u64 - }, - ); + >({ + let y: u64 = unsafe { ::std::mem::transmute(y) }; + y as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield-method-same-name.rs b/bindgen-tests/tests/expectations/tests/bitfield-method-same-name.rs index 09ca005589..70fc98db86 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield-method-same-name.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield-method-same-name.rs @@ -75,16 +75,29 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -95,17 +108,32 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } } else { - i + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -170,13 +513,15 @@ unsafe extern "C" { impl Foo { #[inline] pub fn type__bindgen_bitfield(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 3u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 3u8>() as u8) + } } #[inline] pub fn set_type__bindgen_bitfield(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 3u8, val as u64) + self._bitfield_1.set_const::<0usize, 3u8>(val as u64) } } #[inline] @@ -187,7 +532,10 @@ impl Foo { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 3u8) as u8, + >>::raw_get_const::< + 0usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -200,12 +548,10 @@ impl Foo { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -214,16 +560,15 @@ impl Foo { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 3u8, - { - let type__bindgen_bitfield: u8 = unsafe { - ::std::mem::transmute(type__bindgen_bitfield) - }; - type__bindgen_bitfield as u64 - }, - ); + >({ + let type__bindgen_bitfield: u8 = unsafe { + ::std::mem::transmute(type__bindgen_bitfield) + }; + type__bindgen_bitfield as u64 + }); __bindgen_bitfield_unit } #[inline] diff --git a/bindgen-tests/tests/expectations/tests/bitfield-template.rs b/bindgen-tests/tests/expectations/tests/bitfield-template.rs index eb454e0db4..5678344882 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield-template.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield-template.rs @@ -75,16 +75,29 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -95,17 +108,32 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift } else { - i + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -164,13 +507,15 @@ impl Default for foo { impl foo { #[inline] pub fn b(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 8u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 8u8>() as u8) + } } #[inline] pub fn set_b(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 8u8, val as u64) + self._bitfield_1.set_const::<0usize, 8u8>(val as u64) } } #[inline] @@ -179,7 +524,10 @@ impl foo { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 8u8) as u8, + >>::raw_get_const::< + 0usize, + 8u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -189,26 +537,23 @@ impl foo { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 8u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(b: bool) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 8u8, - { - let b: u8 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield_align.rs b/bindgen-tests/tests/expectations/tests/bitfield_align.rs index 0c70917fc5..c7f50fce6d 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield_align.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield_align.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,17 +108,32 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift } else { - i + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -163,13 +506,15 @@ const _: () = { impl A { #[inline] pub fn b1(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u32) + } } #[inline] pub fn set_b1(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -178,8 +523,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u32, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -189,23 +536,23 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b2(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u32) + } } #[inline] pub fn set_b2(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -214,8 +561,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) - as u32, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -225,23 +574,23 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b3(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u32) + } } #[inline] pub fn set_b3(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -250,8 +599,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) - as u32, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -261,23 +612,23 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b4(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<3usize, 1u8>() as u32) + } } #[inline] pub fn set_b4(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(3usize, 1u8, val as u64) + self._bitfield_1.set_const::<3usize, 1u8>(val as u64) } } #[inline] @@ -286,8 +637,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 3usize, 1u8) - as u32, + >>::raw_get_const::< + 3usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -297,23 +650,23 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 3usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b5(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 1u8>() as u32) + } } #[inline] pub fn set_b5(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 1u8, val as u64) + self._bitfield_1.set_const::<4usize, 1u8>(val as u64) } } #[inline] @@ -322,8 +675,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 1u8) - as u32, + >>::raw_get_const::< + 4usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -333,23 +688,23 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b6(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<5usize, 1u8>() as u32) + } } #[inline] pub fn set_b6(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(5usize, 1u8, val as u64) + self._bitfield_1.set_const::<5usize, 1u8>(val as u64) } } #[inline] @@ -358,8 +713,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 5usize, 1u8) - as u32, + >>::raw_get_const::< + 5usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -369,23 +726,23 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 5usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b7(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<6usize, 1u8>() as u32) + } } #[inline] pub fn set_b7(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(6usize, 1u8, val as u64) + self._bitfield_1.set_const::<6usize, 1u8>(val as u64) } } #[inline] @@ -394,8 +751,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 6usize, 1u8) - as u32, + >>::raw_get_const::< + 6usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -405,23 +764,23 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 6usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b8(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(7usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<7usize, 1u8>() as u32) + } } #[inline] pub fn set_b8(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(7usize, 1u8, val as u64) + self._bitfield_1.set_const::<7usize, 1u8>(val as u64) } } #[inline] @@ -430,8 +789,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 7usize, 1u8) - as u32, + >>::raw_get_const::< + 7usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -441,23 +802,23 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 7usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b9(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<8usize, 1u8>() as u32) + } } #[inline] pub fn set_b9(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(8usize, 1u8, val as u64) + self._bitfield_1.set_const::<8usize, 1u8>(val as u64) } } #[inline] @@ -466,8 +827,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 8usize, 1u8) - as u32, + >>::raw_get_const::< + 8usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -477,23 +840,23 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 8usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b10(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(9usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<9usize, 1u8>() as u32) + } } #[inline] pub fn set_b10(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(9usize, 1u8, val as u64) + self._bitfield_1.set_const::<9usize, 1u8>(val as u64) } } #[inline] @@ -502,8 +865,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 9usize, 1u8) - as u32, + >>::raw_get_const::< + 9usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -513,12 +878,10 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 9usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -536,95 +899,85 @@ impl A { ) -> __BindgenBitfieldUnit<[u8; 2usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 2usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let b1: u32 = unsafe { ::std::mem::transmute(b1) }; - b1 as u64 - }, - ); + >({ + let b1: u32 = unsafe { ::std::mem::transmute(b1) }; + b1 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let b2: u32 = unsafe { ::std::mem::transmute(b2) }; - b2 as u64 - }, - ); + >({ + let b2: u32 = unsafe { ::std::mem::transmute(b2) }; + b2 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let b3: u32 = unsafe { ::std::mem::transmute(b3) }; - b3 as u64 - }, - ); + >({ + let b3: u32 = unsafe { ::std::mem::transmute(b3) }; + b3 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 3usize, 1u8, - { - let b4: u32 = unsafe { ::std::mem::transmute(b4) }; - b4 as u64 - }, - ); + >({ + let b4: u32 = unsafe { ::std::mem::transmute(b4) }; + b4 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 1u8, - { - let b5: u32 = unsafe { ::std::mem::transmute(b5) }; - b5 as u64 - }, - ); + >({ + let b5: u32 = unsafe { ::std::mem::transmute(b5) }; + b5 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 5usize, 1u8, - { - let b6: u32 = unsafe { ::std::mem::transmute(b6) }; - b6 as u64 - }, - ); + >({ + let b6: u32 = unsafe { ::std::mem::transmute(b6) }; + b6 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 6usize, 1u8, - { - let b7: u32 = unsafe { ::std::mem::transmute(b7) }; - b7 as u64 - }, - ); + >({ + let b7: u32 = unsafe { ::std::mem::transmute(b7) }; + b7 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 7usize, 1u8, - { - let b8: u32 = unsafe { ::std::mem::transmute(b8) }; - b8 as u64 - }, - ); + >({ + let b8: u32 = unsafe { ::std::mem::transmute(b8) }; + b8 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 8usize, 1u8, - { - let b9: u32 = unsafe { ::std::mem::transmute(b9) }; - b9 as u64 - }, - ); + >({ + let b9: u32 = unsafe { ::std::mem::transmute(b9) }; + b9 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 9usize, 1u8, - { - let b10: u32 = unsafe { ::std::mem::transmute(b10) }; - b10 as u64 - }, - ); + >({ + let b10: u32 = unsafe { ::std::mem::transmute(b10) }; + b10 as u64 + }); __bindgen_bitfield_unit } } @@ -642,13 +995,15 @@ const _: () = { impl B { #[inline] pub fn foo(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 31u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 31u8>() as u32) + } } #[inline] pub fn set_foo(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 31u8, val as u64) + self._bitfield_1.set_const::<0usize, 31u8>(val as u64) } } #[inline] @@ -657,8 +1012,10 @@ impl B { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 31u8) - as u32, + >>::raw_get_const::< + 0usize, + 31u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -668,23 +1025,23 @@ impl B { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 31u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bar(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(31usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<31usize, 1u8>() as u8) + } } #[inline] pub fn set_bar(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(31usize, 1u8, val as u64) + self._bitfield_1.set_const::<31usize, 1u8>(val as u64) } } #[inline] @@ -693,8 +1050,10 @@ impl B { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 31usize, 1u8) - as u8, + >>::raw_get_const::< + 31usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -704,12 +1063,10 @@ impl B { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 31usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -719,23 +1076,21 @@ impl B { ) -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 31u8, - { - let foo: u32 = unsafe { ::std::mem::transmute(foo) }; - foo as u64 - }, - ); + >({ + let foo: u32 = unsafe { ::std::mem::transmute(foo) }; + foo as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 31usize, 1u8, - { - let bar: u8 = unsafe { ::std::mem::transmute(bar) }; - bar as u64 - }, - ); + >({ + let bar: u8 = unsafe { ::std::mem::transmute(bar) }; + bar as u64 + }); __bindgen_bitfield_unit } } @@ -756,13 +1111,15 @@ const _: () = { impl C { #[inline] pub fn b1(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u32) + } } #[inline] pub fn set_b1(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -771,8 +1128,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u32, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -782,23 +1141,23 @@ impl C { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b2(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u32) + } } #[inline] pub fn set_b2(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -807,8 +1166,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) - as u32, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -818,12 +1179,10 @@ impl C { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -833,23 +1192,21 @@ impl C { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let b1: u32 = unsafe { ::std::mem::transmute(b1) }; - b1 as u64 - }, - ); + >({ + let b1: u32 = unsafe { ::std::mem::transmute(b1) }; + b1 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let b2: u32 = unsafe { ::std::mem::transmute(b2) }; - b2 as u64 - }, - ); + >({ + let b2: u32 = unsafe { ::std::mem::transmute(b2) }; + b2 as u64 + }); __bindgen_bitfield_unit } } @@ -868,13 +1225,15 @@ const _: () = { impl Date1 { #[inline] pub fn nWeekDay(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 3u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 3u8>() as u16) + } } #[inline] pub fn set_nWeekDay(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 3u8, val as u64) + self._bitfield_1.set_const::<0usize, 3u8>(val as u64) } } #[inline] @@ -883,8 +1242,10 @@ impl Date1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 3u8) - as u16, + >>::raw_get_const::< + 0usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -894,23 +1255,23 @@ impl Date1 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn nMonthDay(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 6u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<3usize, 6u8>() as u16) + } } #[inline] pub fn set_nMonthDay(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(3usize, 6u8, val as u64) + self._bitfield_1.set_const::<3usize, 6u8>(val as u64) } } #[inline] @@ -919,8 +1280,10 @@ impl Date1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 3usize, 6u8) - as u16, + >>::raw_get_const::< + 3usize, + 6u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -930,23 +1293,23 @@ impl Date1 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 3usize, 6u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn nMonth(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(9usize, 5u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<9usize, 5u8>() as u16) + } } #[inline] pub fn set_nMonth(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(9usize, 5u8, val as u64) + self._bitfield_1.set_const::<9usize, 5u8>(val as u64) } } #[inline] @@ -955,8 +1318,10 @@ impl Date1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 9usize, 5u8) - as u16, + >>::raw_get_const::< + 9usize, + 5u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -966,23 +1331,23 @@ impl Date1 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 9usize, 5u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn nYear(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 8u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<16usize, 8u8>() as u16) + } } #[inline] pub fn set_nYear(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 8u8, val as u64) + self._bitfield_1.set_const::<16usize, 8u8>(val as u64) } } #[inline] @@ -991,8 +1356,10 @@ impl Date1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 16usize, 8u8) - as u16, + >>::raw_get_const::< + 16usize, + 8u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1002,12 +1369,10 @@ impl Date1 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 16usize, 8u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -1019,41 +1384,37 @@ impl Date1 { ) -> __BindgenBitfieldUnit<[u8; 3usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 3usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 3u8, - { - let nWeekDay: u16 = unsafe { ::std::mem::transmute(nWeekDay) }; - nWeekDay as u64 - }, - ); + >({ + let nWeekDay: u16 = unsafe { ::std::mem::transmute(nWeekDay) }; + nWeekDay as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 3usize, 6u8, - { - let nMonthDay: u16 = unsafe { ::std::mem::transmute(nMonthDay) }; - nMonthDay as u64 - }, - ); + >({ + let nMonthDay: u16 = unsafe { ::std::mem::transmute(nMonthDay) }; + nMonthDay as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 9usize, 5u8, - { - let nMonth: u16 = unsafe { ::std::mem::transmute(nMonth) }; - nMonth as u64 - }, - ); + >({ + let nMonth: u16 = unsafe { ::std::mem::transmute(nMonth) }; + nMonth as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 8u8, - { - let nYear: u16 = unsafe { ::std::mem::transmute(nYear) }; - nYear as u64 - }, - ); + >({ + let nYear: u16 = unsafe { ::std::mem::transmute(nYear) }; + nYear as u64 + }); __bindgen_bitfield_unit } } @@ -1071,13 +1432,15 @@ const _: () = { impl Date2 { #[inline] pub fn nWeekDay(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 3u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 3u8>() as u16) + } } #[inline] pub fn set_nWeekDay(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 3u8, val as u64) + self._bitfield_1.set_const::<0usize, 3u8>(val as u64) } } #[inline] @@ -1086,8 +1449,10 @@ impl Date2 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 3u8) - as u16, + >>::raw_get_const::< + 0usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1097,23 +1462,23 @@ impl Date2 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn nMonthDay(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 6u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<3usize, 6u8>() as u16) + } } #[inline] pub fn set_nMonthDay(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(3usize, 6u8, val as u64) + self._bitfield_1.set_const::<3usize, 6u8>(val as u64) } } #[inline] @@ -1122,8 +1487,10 @@ impl Date2 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 3usize, 6u8) - as u16, + >>::raw_get_const::< + 3usize, + 6u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1133,23 +1500,23 @@ impl Date2 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 3usize, 6u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn nMonth(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(9usize, 5u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<9usize, 5u8>() as u16) + } } #[inline] pub fn set_nMonth(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(9usize, 5u8, val as u64) + self._bitfield_1.set_const::<9usize, 5u8>(val as u64) } } #[inline] @@ -1158,8 +1525,10 @@ impl Date2 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 9usize, 5u8) - as u16, + >>::raw_get_const::< + 9usize, + 5u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1169,23 +1538,23 @@ impl Date2 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 9usize, 5u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn nYear(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 8u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<16usize, 8u8>() as u16) + } } #[inline] pub fn set_nYear(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 8u8, val as u64) + self._bitfield_1.set_const::<16usize, 8u8>(val as u64) } } #[inline] @@ -1194,8 +1563,10 @@ impl Date2 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 16usize, 8u8) - as u16, + >>::raw_get_const::< + 16usize, + 8u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1205,23 +1576,23 @@ impl Date2 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 16usize, 8u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn byte(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 8u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<24usize, 8u8>() as u8) + } } #[inline] pub fn set_byte(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(24usize, 8u8, val as u64) + self._bitfield_1.set_const::<24usize, 8u8>(val as u64) } } #[inline] @@ -1230,8 +1601,10 @@ impl Date2 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 24usize, 8u8) - as u8, + >>::raw_get_const::< + 24usize, + 8u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1241,12 +1614,10 @@ impl Date2 { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 24usize, 8u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -1259,50 +1630,45 @@ impl Date2 { ) -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 3u8, - { - let nWeekDay: u16 = unsafe { ::std::mem::transmute(nWeekDay) }; - nWeekDay as u64 - }, - ); + >({ + let nWeekDay: u16 = unsafe { ::std::mem::transmute(nWeekDay) }; + nWeekDay as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 3usize, 6u8, - { - let nMonthDay: u16 = unsafe { ::std::mem::transmute(nMonthDay) }; - nMonthDay as u64 - }, - ); + >({ + let nMonthDay: u16 = unsafe { ::std::mem::transmute(nMonthDay) }; + nMonthDay as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 9usize, 5u8, - { - let nMonth: u16 = unsafe { ::std::mem::transmute(nMonth) }; - nMonth as u64 - }, - ); + >({ + let nMonth: u16 = unsafe { ::std::mem::transmute(nMonth) }; + nMonth as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 8u8, - { - let nYear: u16 = unsafe { ::std::mem::transmute(nYear) }; - nYear as u64 - }, - ); + >({ + let nYear: u16 = unsafe { ::std::mem::transmute(nYear) }; + nYear as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 24usize, 8u8, - { - let byte: u8 = unsafe { ::std::mem::transmute(byte) }; - byte as u64 - }, - ); + >({ + let byte: u8 = unsafe { ::std::mem::transmute(byte) }; + byte as u64 + }); __bindgen_bitfield_unit } } @@ -1322,13 +1688,15 @@ const _: () = { impl Date3 { #[inline] pub fn nWeekDay(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 3u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 3u8>() as u16) + } } #[inline] pub fn set_nWeekDay(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 3u8, val as u64) + self._bitfield_1.set_const::<0usize, 3u8>(val as u64) } } #[inline] @@ -1337,8 +1705,10 @@ impl Date3 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 3u8) - as u16, + >>::raw_get_const::< + 0usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1348,23 +1718,23 @@ impl Date3 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn nMonthDay(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 6u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<3usize, 6u8>() as u16) + } } #[inline] pub fn set_nMonthDay(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(3usize, 6u8, val as u64) + self._bitfield_1.set_const::<3usize, 6u8>(val as u64) } } #[inline] @@ -1373,8 +1743,10 @@ impl Date3 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 3usize, 6u8) - as u16, + >>::raw_get_const::< + 3usize, + 6u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1384,23 +1756,23 @@ impl Date3 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 3usize, 6u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn nMonth(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(9usize, 5u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<9usize, 5u8>() as u16) + } } #[inline] pub fn set_nMonth(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(9usize, 5u8, val as u64) + self._bitfield_1.set_const::<9usize, 5u8>(val as u64) } } #[inline] @@ -1409,8 +1781,10 @@ impl Date3 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 9usize, 5u8) - as u16, + >>::raw_get_const::< + 9usize, + 5u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1420,23 +1794,23 @@ impl Date3 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 9usize, 5u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn nYear(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 8u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<16usize, 8u8>() as u16) + } } #[inline] pub fn set_nYear(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 8u8, val as u64) + self._bitfield_1.set_const::<16usize, 8u8>(val as u64) } } #[inline] @@ -1445,8 +1819,10 @@ impl Date3 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 16usize, 8u8) - as u16, + >>::raw_get_const::< + 16usize, + 8u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1456,12 +1832,10 @@ impl Date3 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 16usize, 8u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -1473,41 +1847,37 @@ impl Date3 { ) -> __BindgenBitfieldUnit<[u8; 3usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 3usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 3u8, - { - let nWeekDay: u16 = unsafe { ::std::mem::transmute(nWeekDay) }; - nWeekDay as u64 - }, - ); + >({ + let nWeekDay: u16 = unsafe { ::std::mem::transmute(nWeekDay) }; + nWeekDay as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 3usize, 6u8, - { - let nMonthDay: u16 = unsafe { ::std::mem::transmute(nMonthDay) }; - nMonthDay as u64 - }, - ); + >({ + let nMonthDay: u16 = unsafe { ::std::mem::transmute(nMonthDay) }; + nMonthDay as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 9usize, 5u8, - { - let nMonth: u16 = unsafe { ::std::mem::transmute(nMonth) }; - nMonth as u64 - }, - ); + >({ + let nMonth: u16 = unsafe { ::std::mem::transmute(nMonth) }; + nMonth as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 8u8, - { - let nYear: u16 = unsafe { ::std::mem::transmute(nYear) }; - nYear as u64 - }, - ); + >({ + let nYear: u16 = unsafe { ::std::mem::transmute(nYear) }; + nYear as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield_align_2.rs b/bindgen-tests/tests/expectations/tests/bitfield_align_2.rs index b71bba18ad..9d5591e5a5 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield_align_2.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield_align_2.rs @@ -76,17 +76,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -96,16 +109,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -116,15 +144,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -134,15 +184,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -177,13 +520,15 @@ impl Default for TaggedPtr { impl TaggedPtr { #[inline] pub fn tag(&self) -> MyEnum { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 2u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 2u8>() as u32) + } } #[inline] pub fn set_tag(&mut self, val: MyEnum) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 2u8, val as u64) + self._bitfield_1.set_const::<0usize, 2u8>(val as u64) } } #[inline] @@ -192,8 +537,10 @@ impl TaggedPtr { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 2u8) - as u32, + >>::raw_get_const::< + 0usize, + 2u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -203,23 +550,23 @@ impl TaggedPtr { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 2u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn ptr(&self) -> ::std::os::raw::c_long { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 62u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 62u8>() as u64) + } } #[inline] pub fn set_ptr(&mut self, val: ::std::os::raw::c_long) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 62u8, val as u64) + self._bitfield_1.set_const::<2usize, 62u8>(val as u64) } } #[inline] @@ -228,8 +575,10 @@ impl TaggedPtr { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 62u8) - as u64, + >>::raw_get_const::< + 2usize, + 62u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -239,12 +588,10 @@ impl TaggedPtr { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 62u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -254,23 +601,21 @@ impl TaggedPtr { ) -> __BindgenBitfieldUnit<[u8; 8usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 2u8, - { - let tag: u32 = unsafe { ::std::mem::transmute(tag) }; - tag as u64 - }, - ); + >({ + let tag: u32 = unsafe { ::std::mem::transmute(tag) }; + tag as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 62u8, - { - let ptr: u64 = unsafe { ::std::mem::transmute(ptr) }; - ptr as u64 - }, - ); + >({ + let ptr: u64 = unsafe { ::std::mem::transmute(ptr) }; + ptr as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield_method_mangling.rs b/bindgen-tests/tests/expectations/tests/bitfield_method_mangling.rs index 35117c74b6..a4b2dc009b 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield_method_mangling.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield_method_mangling.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -163,13 +506,15 @@ const _: () = { impl mach_msg_type_descriptor_t { #[inline] pub fn pad3(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 24u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 24u8>() as u32) + } } #[inline] pub fn set_pad3(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 24u8, val as u64) + self._bitfield_1.set_const::<0usize, 24u8>(val as u64) } } #[inline] @@ -178,8 +523,10 @@ impl mach_msg_type_descriptor_t { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 24u8) - as u32, + >>::raw_get_const::< + 0usize, + 24u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -189,23 +536,23 @@ impl mach_msg_type_descriptor_t { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 24u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn type_(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 8u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<24usize, 8u8>() as u32) + } } #[inline] pub fn set_type(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(24usize, 8u8, val as u64) + self._bitfield_1.set_const::<24usize, 8u8>(val as u64) } } #[inline] @@ -214,8 +561,10 @@ impl mach_msg_type_descriptor_t { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 24usize, 8u8) - as u32, + >>::raw_get_const::< + 24usize, + 8u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -225,12 +574,10 @@ impl mach_msg_type_descriptor_t { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 24usize, 8u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -240,23 +587,21 @@ impl mach_msg_type_descriptor_t { ) -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 24u8, - { - let pad3: u32 = unsafe { ::std::mem::transmute(pad3) }; - pad3 as u64 - }, - ); + >({ + let pad3: u32 = unsafe { ::std::mem::transmute(pad3) }; + pad3 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 24usize, 8u8, - { - let type_: u32 = unsafe { ::std::mem::transmute(type_) }; - type_ as u64 - }, - ); + >({ + let type_: u32 = unsafe { ::std::mem::transmute(type_) }; + type_ as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield_pack_offset.rs b/bindgen-tests/tests/expectations/tests/bitfield_pack_offset.rs index d654e25b27..2b0081ae56 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield_pack_offset.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield_pack_offset.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,17 +108,32 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift } else { - i + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -176,13 +519,15 @@ impl Default for A { impl A { #[inline] pub fn firmness(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 4u8>() as u8) + } } #[inline] pub fn set_firmness(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 4u8, val as u64) + self._bitfield_1.set_const::<0usize, 4u8>(val as u64) } } #[inline] @@ -191,7 +536,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 4u8) as u8, + >>::raw_get_const::< + 0usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -201,23 +549,23 @@ impl A { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn color(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 4u8>() as u8) + } } #[inline] pub fn set_color(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 4u8, val as u64) + self._bitfield_1.set_const::<4usize, 4u8>(val as u64) } } #[inline] @@ -226,7 +574,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 4u8) as u8, + >>::raw_get_const::< + 4usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -236,23 +587,23 @@ impl A { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn weedsBonus(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 3u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<8usize, 3u8>() as u16) + } } #[inline] pub fn set_weedsBonus(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(8usize, 3u8, val as u64) + self._bitfield_1.set_const::<8usize, 3u8>(val as u64) } } #[inline] @@ -261,8 +612,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 8usize, 3u8) - as u16, + >>::raw_get_const::< + 8usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -272,23 +625,23 @@ impl A { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 8usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn pestsBonus(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(11usize, 3u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<11usize, 3u8>() as u16) + } } #[inline] pub fn set_pestsBonus(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(11usize, 3u8, val as u64) + self._bitfield_1.set_const::<11usize, 3u8>(val as u64) } } #[inline] @@ -297,8 +650,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 11usize, 3u8) - as u16, + >>::raw_get_const::< + 11usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -308,23 +663,23 @@ impl A { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 11usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn size(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(14usize, 10u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<14usize, 10u8>() as u16) + } } #[inline] pub fn set_size(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(14usize, 10u8, val as u64) + self._bitfield_1.set_const::<14usize, 10u8>(val as u64) } } #[inline] @@ -333,8 +688,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 14usize, 10u8) - as u16, + >>::raw_get_const::< + 14usize, + 10u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -344,12 +701,10 @@ impl A { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 14usize, 10u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -362,61 +717,58 @@ impl A { ) -> __BindgenBitfieldUnit<[u8; 3usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 3usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 4u8, - { - let firmness: u8 = unsafe { ::std::mem::transmute(firmness) }; - firmness as u64 - }, - ); + >({ + let firmness: u8 = unsafe { ::std::mem::transmute(firmness) }; + firmness as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 4u8, - { - let color: u8 = unsafe { ::std::mem::transmute(color) }; - color as u64 - }, - ); + >({ + let color: u8 = unsafe { ::std::mem::transmute(color) }; + color as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 8usize, 3u8, - { - let weedsBonus: u16 = unsafe { ::std::mem::transmute(weedsBonus) }; - weedsBonus as u64 - }, - ); + >({ + let weedsBonus: u16 = unsafe { ::std::mem::transmute(weedsBonus) }; + weedsBonus as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 11usize, 3u8, - { - let pestsBonus: u16 = unsafe { ::std::mem::transmute(pestsBonus) }; - pestsBonus as u64 - }, - ); + >({ + let pestsBonus: u16 = unsafe { ::std::mem::transmute(pestsBonus) }; + pestsBonus as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 14usize, 10u8, - { - let size: u16 = unsafe { ::std::mem::transmute(size) }; - size as u64 - }, - ); + >({ + let size: u16 = unsafe { ::std::mem::transmute(size) }; + size as u64 + }); __bindgen_bitfield_unit } #[inline] pub fn minYield(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_2.get(0usize, 4u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<0usize, 4u8>() as u8) + } } #[inline] pub fn set_minYield(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_2.set(0usize, 4u8, val as u64) + self._bitfield_2.set_const::<0usize, 4u8>(val as u64) } } #[inline] @@ -425,7 +777,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 0usize, 4u8) as u8, + >>::raw_get_const::< + 0usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u8, ) } } @@ -435,23 +790,23 @@ impl A { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 0usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn waterBonus(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_2.get(4usize, 4u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<4usize, 4u8>() as u8) + } } #[inline] pub fn set_waterBonus(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_2.set(4usize, 4u8, val as u64) + self._bitfield_2.set_const::<4usize, 4u8>(val as u64) } } #[inline] @@ -460,7 +815,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 4usize, 4u8) as u8, + >>::raw_get_const::< + 4usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u8, ) } } @@ -470,12 +828,10 @@ impl A { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 4usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] @@ -485,23 +841,21 @@ impl A { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 4u8, - { - let minYield: u8 = unsafe { ::std::mem::transmute(minYield) }; - minYield as u64 - }, - ); + >({ + let minYield: u8 = unsafe { ::std::mem::transmute(minYield) }; + minYield as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 4u8, - { - let waterBonus: u8 = unsafe { ::std::mem::transmute(waterBonus) }; - waterBonus as u64 - }, - ); + >({ + let waterBonus: u8 = unsafe { ::std::mem::transmute(waterBonus) }; + waterBonus as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield_pragma_packed.rs b/bindgen-tests/tests/expectations/tests/bitfield_pragma_packed.rs index 6f9adcb5ab..fd16e0d41d 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield_pragma_packed.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield_pragma_packed.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,17 +108,32 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift } else { - i + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -158,13 +501,15 @@ const _: () = { impl Struct { #[inline] pub fn a(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_a(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -173,7 +518,10 @@ impl Struct { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -183,23 +531,23 @@ impl Struct { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u8) + } } #[inline] pub fn set_b(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -208,7 +556,10 @@ impl Struct { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) as u8, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -218,23 +569,23 @@ impl Struct { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn c(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 6u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 6u8>() as u8) + } } #[inline] pub fn set_c(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 6u8, val as u64) + self._bitfield_1.set_const::<2usize, 6u8>(val as u64) } } #[inline] @@ -243,7 +594,10 @@ impl Struct { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 6u8) as u8, + >>::raw_get_const::< + 2usize, + 6u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -253,23 +607,23 @@ impl Struct { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 6u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn d(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 16u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<8usize, 16u8>() as u16) + } } #[inline] pub fn set_d(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(8usize, 16u8, val as u64) + self._bitfield_1.set_const::<8usize, 16u8>(val as u64) } } #[inline] @@ -278,8 +632,10 @@ impl Struct { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 8usize, 16u8) - as u16, + >>::raw_get_const::< + 8usize, + 16u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -289,23 +645,23 @@ impl Struct { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 8usize, 16u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn e(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 8u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<24usize, 8u8>() as u8) + } } #[inline] pub fn set_e(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(24usize, 8u8, val as u64) + self._bitfield_1.set_const::<24usize, 8u8>(val as u64) } } #[inline] @@ -314,8 +670,10 @@ impl Struct { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 24usize, 8u8) - as u8, + >>::raw_get_const::< + 24usize, + 8u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -325,12 +683,10 @@ impl Struct { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 24usize, 8u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -343,50 +699,45 @@ impl Struct { ) -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u8 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u8 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let b: u8 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 6u8, - { - let c: u8 = unsafe { ::std::mem::transmute(c) }; - c as u64 - }, - ); + >({ + let c: u8 = unsafe { ::std::mem::transmute(c) }; + c as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 8usize, 16u8, - { - let d: u16 = unsafe { ::std::mem::transmute(d) }; - d as u64 - }, - ); + >({ + let d: u16 = unsafe { ::std::mem::transmute(d) }; + d as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 24usize, 8u8, - { - let e: u8 = unsafe { ::std::mem::transmute(e) }; - e as u64 - }, - ); + >({ + let e: u8 = unsafe { ::std::mem::transmute(e) }; + e as u64 + }); __bindgen_bitfield_unit } } @@ -404,13 +755,15 @@ const _: () = { impl Inner { #[inline] pub fn a(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 16u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 16u8>() as u16) + } } #[inline] pub fn set_a(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 16u8, val as u64) + self._bitfield_1.set_const::<0usize, 16u8>(val as u64) } } #[inline] @@ -419,8 +772,10 @@ impl Inner { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 16u8) - as u16, + >>::raw_get_const::< + 0usize, + 16u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -430,23 +785,23 @@ impl Inner { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 16u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 16u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<16usize, 16u8>() as u16) + } } #[inline] pub fn set_b(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 16u8, val as u64) + self._bitfield_1.set_const::<16usize, 16u8>(val as u64) } } #[inline] @@ -455,8 +810,10 @@ impl Inner { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 16usize, 16u8) - as u16, + >>::raw_get_const::< + 16usize, + 16u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -466,12 +823,10 @@ impl Inner { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 16usize, 16u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -481,23 +836,21 @@ impl Inner { ) -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 16u8, - { - let a: u16 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u16 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 16u8, - { - let b: u16 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u16 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/blocklist_bitfield_unit.rs b/bindgen-tests/tests/expectations/tests/blocklist_bitfield_unit.rs index 77c263e3cc..91d34b9a64 100644 --- a/bindgen-tests/tests/expectations/tests/blocklist_bitfield_unit.rs +++ b/bindgen-tests/tests/expectations/tests/blocklist_bitfield_unit.rs @@ -19,13 +19,15 @@ const _: () = { impl C { #[inline] pub fn b1(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u32) + } } #[inline] pub fn set_b1(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -34,8 +36,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u32, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -45,23 +49,23 @@ impl C { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b2(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u32) + } } #[inline] pub fn set_b2(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -70,8 +74,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) - as u32, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -81,12 +87,10 @@ impl C { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -96,23 +100,21 @@ impl C { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let b1: u32 = unsafe { ::std::mem::transmute(b1) }; - b1 as u64 - }, - ); + >({ + let b1: u32 = unsafe { ::std::mem::transmute(b1) }; + b1 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let b2: u32 = unsafe { ::std::mem::transmute(b2) }; - b2 as u64 - }, - ); + >({ + let b2: u32 = unsafe { ::std::mem::transmute(b2) }; + b2 as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/default_visibility_crate.rs b/bindgen-tests/tests/expectations/tests/default_visibility_crate.rs index aeefb2e0f9..4525a07711 100644 --- a/bindgen-tests/tests/expectations/tests/default_visibility_crate.rs +++ b/bindgen-tests/tests/expectations/tests/default_visibility_crate.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift } else { - i + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -159,13 +502,15 @@ pub struct Color { impl Color { #[inline] pub(crate) fn r(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub(crate) fn set_r(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -174,7 +519,10 @@ impl Color { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -184,23 +532,23 @@ impl Color { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub(crate) fn g(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u8) + } } #[inline] pub(crate) fn set_g(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -209,7 +557,10 @@ impl Color { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) as u8, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -219,23 +570,23 @@ impl Color { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub(crate) fn b(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u8) + } } #[inline] pub(crate) fn set_b(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -244,7 +595,10 @@ impl Color { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) as u8, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -254,12 +608,10 @@ impl Color { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -270,32 +622,29 @@ impl Color { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let r: u8 = unsafe { ::std::mem::transmute(r) }; - r as u64 - }, - ); + >({ + let r: u8 = unsafe { ::std::mem::transmute(r) }; + r as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let g: u8 = unsafe { ::std::mem::transmute(g) }; - g as u64 - }, - ); + >({ + let g: u8 = unsafe { ::std::mem::transmute(g) }; + g as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let b: u8 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/default_visibility_private.rs b/bindgen-tests/tests/expectations/tests/default_visibility_private.rs index dceed75e36..646eadfed1 100644 --- a/bindgen-tests/tests/expectations/tests/default_visibility_private.rs +++ b/bindgen-tests/tests/expectations/tests/default_visibility_private.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift } else { - i + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -159,13 +502,15 @@ pub struct Color { impl Color { #[inline] fn r(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] fn set_r(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -174,7 +519,10 @@ impl Color { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -184,23 +532,23 @@ impl Color { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] fn g(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u8) + } } #[inline] fn set_g(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -209,7 +557,10 @@ impl Color { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) as u8, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -219,23 +570,23 @@ impl Color { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] fn b(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u8) + } } #[inline] fn set_b(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -244,7 +595,10 @@ impl Color { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) as u8, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -254,12 +608,10 @@ impl Color { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -270,32 +622,29 @@ impl Color { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let r: u8 = unsafe { ::std::mem::transmute(r) }; - r as u64 - }, - ); + >({ + let r: u8 = unsafe { ::std::mem::transmute(r) }; + r as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let g: u8 = unsafe { ::std::mem::transmute(g) }; - g as u64 - }, - ); + >({ + let g: u8 = unsafe { ::std::mem::transmute(g) }; + g as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let b: u8 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/default_visibility_private_respects_cxx_access_spec.rs b/bindgen-tests/tests/expectations/tests/default_visibility_private_respects_cxx_access_spec.rs index f43be84bb0..ba8ff7d004 100644 --- a/bindgen-tests/tests/expectations/tests/default_visibility_private_respects_cxx_access_spec.rs +++ b/bindgen-tests/tests/expectations/tests/default_visibility_private_respects_cxx_access_spec.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift } else { - i + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -159,13 +502,15 @@ pub struct Color { impl Color { #[inline] pub fn r(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_r(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -174,7 +519,10 @@ impl Color { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -184,23 +532,23 @@ impl Color { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn g(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u8) + } } #[inline] pub fn set_g(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -209,7 +557,10 @@ impl Color { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) as u8, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -219,23 +570,23 @@ impl Color { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u8) + } } #[inline] pub fn set_b(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -244,7 +595,10 @@ impl Color { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) as u8, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -254,12 +608,10 @@ impl Color { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -270,32 +622,29 @@ impl Color { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let r: u8 = unsafe { ::std::mem::transmute(r) }; - r as u64 - }, - ); + >({ + let r: u8 = unsafe { ::std::mem::transmute(r) }; + r as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let g: u8 = unsafe { ::std::mem::transmute(g) }; - g as u64 - }, - ); + >({ + let g: u8 = unsafe { ::std::mem::transmute(g) }; + g as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let b: u8 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/derive-bitfield-method-same-name.rs b/bindgen-tests/tests/expectations/tests/derive-bitfield-method-same-name.rs index 7fa8bc41ab..7b291fa26f 100644 --- a/bindgen-tests/tests/expectations/tests/derive-bitfield-method-same-name.rs +++ b/bindgen-tests/tests/expectations/tests/derive-bitfield-method-same-name.rs @@ -75,16 +75,29 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -95,17 +108,32 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } } else { - i + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -185,13 +528,15 @@ impl Default for Foo { impl Foo { #[inline] pub fn type__bindgen_bitfield(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 3u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 3u8>() as u8) + } } #[inline] pub fn set_type__bindgen_bitfield(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 3u8, val as u64) + self._bitfield_1.set_const::<0usize, 3u8>(val as u64) } } #[inline] @@ -202,7 +547,10 @@ impl Foo { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 3u8) as u8, + >>::raw_get_const::< + 0usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -215,12 +563,10 @@ impl Foo { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -229,16 +575,15 @@ impl Foo { ) -> __BindgenBitfieldUnit<[u8; 2usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 2usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 3u8, - { - let type__bindgen_bitfield: u8 = unsafe { - ::std::mem::transmute(type__bindgen_bitfield) - }; - type__bindgen_bitfield as u64 - }, - ); + >({ + let type__bindgen_bitfield: u8 = unsafe { + ::std::mem::transmute(type__bindgen_bitfield) + }; + type__bindgen_bitfield as u64 + }); __bindgen_bitfield_unit } #[inline] diff --git a/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-1-51.rs b/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-1-51.rs index 87cbb7346c..33752b15fa 100644 --- a/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-1-51.rs +++ b/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-1-51.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -169,13 +512,15 @@ impl Default for C { impl C { #[inline] pub fn a(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_a(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -184,7 +529,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -194,23 +542,23 @@ impl C { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 7u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 7u8>() as u8) + } } #[inline] pub fn set_b(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 7u8, val as u64) + self._bitfield_1.set_const::<1usize, 7u8>(val as u64) } } #[inline] @@ -219,7 +567,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 7u8) as u8, + >>::raw_get_const::< + 1usize, + 7u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -229,35 +580,31 @@ impl C { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 7u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(a: bool, b: bool) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u8 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u8 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 7u8, - { - let b: u8 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-core.rs b/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-core.rs index 937ad4ad0c..e21a809b88 100644 --- a/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-core.rs +++ b/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-core.rs @@ -76,17 +76,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -96,16 +109,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -116,15 +144,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -134,15 +184,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -172,13 +515,15 @@ impl Default for C { impl C { #[inline] pub fn a(&self) -> bool { - unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::core::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_a(&mut self, val: bool) { unsafe { let val: u8 = ::core::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -187,8 +532,10 @@ impl C { ::core::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::core::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::core::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -198,23 +545,23 @@ impl C { let val: u8 = ::core::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::core::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::core::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> bool { - unsafe { ::core::mem::transmute(self._bitfield_1.get(1usize, 7u8) as u8) } + unsafe { + ::core::mem::transmute(self._bitfield_1.get_const::<1usize, 7u8>() as u8) + } } #[inline] pub fn set_b(&mut self, val: bool) { unsafe { let val: u8 = ::core::mem::transmute(val); - self._bitfield_1.set(1usize, 7u8, val as u64) + self._bitfield_1.set_const::<1usize, 7u8>(val as u64) } } #[inline] @@ -223,8 +570,10 @@ impl C { ::core::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::core::ptr::addr_of!((*this)._bitfield_1), 1usize, 7u8) - as u8, + >>::raw_get_const::< + 1usize, + 7u8, + >(::core::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -234,35 +583,31 @@ impl C { let val: u8 = ::core::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::core::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 7u8, - val as u64, - ) + >(::core::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(a: bool, b: bool) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u8 = unsafe { ::core::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u8 = unsafe { ::core::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 7u8, - { - let b: u8 = unsafe { ::core::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::core::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/derive-debug-bitfield.rs b/bindgen-tests/tests/expectations/tests/derive-debug-bitfield.rs index 87cbb7346c..33752b15fa 100644 --- a/bindgen-tests/tests/expectations/tests/derive-debug-bitfield.rs +++ b/bindgen-tests/tests/expectations/tests/derive-debug-bitfield.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -169,13 +512,15 @@ impl Default for C { impl C { #[inline] pub fn a(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_a(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -184,7 +529,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -194,23 +542,23 @@ impl C { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 7u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 7u8>() as u8) + } } #[inline] pub fn set_b(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 7u8, val as u64) + self._bitfield_1.set_const::<1usize, 7u8>(val as u64) } } #[inline] @@ -219,7 +567,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 7u8) as u8, + >>::raw_get_const::< + 1usize, + 7u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -229,35 +580,31 @@ impl C { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 7u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(a: bool, b: bool) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u8 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u8 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 7u8, - { - let b: u8 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/derive-partialeq-bitfield.rs b/bindgen-tests/tests/expectations/tests/derive-partialeq-bitfield.rs index b8da88e2a7..47d6808474 100644 --- a/bindgen-tests/tests/expectations/tests/derive-partialeq-bitfield.rs +++ b/bindgen-tests/tests/expectations/tests/derive-partialeq-bitfield.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -169,13 +512,15 @@ impl Default for C { impl C { #[inline] pub fn a(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_a(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -184,7 +529,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -194,23 +542,23 @@ impl C { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 7u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 7u8>() as u8) + } } #[inline] pub fn set_b(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 7u8, val as u64) + self._bitfield_1.set_const::<1usize, 7u8>(val as u64) } } #[inline] @@ -219,7 +567,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 7u8) as u8, + >>::raw_get_const::< + 1usize, + 7u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -229,35 +580,31 @@ impl C { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 7u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(a: bool, b: bool) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u8 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u8 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 7u8, - { - let b: u8 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/divide-by-zero-in-struct-layout.rs b/bindgen-tests/tests/expectations/tests/divide-by-zero-in-struct-layout.rs index 37139d3136..c1a2029373 100644 --- a/bindgen-tests/tests/expectations/tests/divide-by-zero-in-struct-layout.rs +++ b/bindgen-tests/tests/expectations/tests/divide-by-zero-in-struct-layout.rs @@ -75,16 +75,29 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -95,17 +108,32 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/field-visibility-callback.rs b/bindgen-tests/tests/expectations/tests/field-visibility-callback.rs index 99ca3d4b9b..795f6bec30 100644 --- a/bindgen-tests/tests/expectations/tests/field-visibility-callback.rs +++ b/bindgen-tests/tests/expectations/tests/field-visibility-callback.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -165,13 +508,15 @@ const _: () = { impl my_struct { #[inline] pub fn c(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u32) + } } #[inline] pub fn set_c(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -180,8 +525,10 @@ impl my_struct { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u32, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -191,23 +538,23 @@ impl my_struct { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] fn private_d(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u32) + } } #[inline] fn set_private_d(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -216,8 +563,10 @@ impl my_struct { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) - as u32, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -227,12 +576,10 @@ impl my_struct { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -242,23 +589,21 @@ impl my_struct { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let c: u32 = unsafe { ::std::mem::transmute(c) }; - c as u64 - }, - ); + >({ + let c: u32 = unsafe { ::std::mem::transmute(c) }; + c as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let private_d: u32 = unsafe { ::std::mem::transmute(private_d) }; - private_d as u64 - }, - ); + >({ + let private_d: u32 = unsafe { ::std::mem::transmute(private_d) }; + private_d as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/field-visibility.rs b/bindgen-tests/tests/expectations/tests/field-visibility.rs index 13a1d9a543..35645fe61a 100644 --- a/bindgen-tests/tests/expectations/tests/field-visibility.rs +++ b/bindgen-tests/tests/expectations/tests/field-visibility.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -160,13 +503,15 @@ const _: () = { impl my_struct1 { #[inline] fn a(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u32) + } } #[inline] fn set_a(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -175,8 +520,10 @@ impl my_struct1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u32, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -186,26 +533,23 @@ impl my_struct1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] fn new_bitfield_1(a: ::std::os::raw::c_int) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u32 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u32 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit } } @@ -224,13 +568,15 @@ const _: () = { impl my_struct2 { #[inline] pub fn a(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u32) + } } #[inline] pub fn set_a(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -239,8 +585,10 @@ impl my_struct2 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u32, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -250,12 +598,10 @@ impl my_struct2 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -264,14 +610,13 @@ impl my_struct2 { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u32 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u32 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/incomplete-array-padding.rs b/bindgen-tests/tests/expectations/tests/incomplete-array-padding.rs index a90fe54bf3..7e61272b36 100644 --- a/bindgen-tests/tests/expectations/tests/incomplete-array-padding.rs +++ b/bindgen-tests/tests/expectations/tests/incomplete-array-padding.rs @@ -75,16 +75,29 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -95,17 +108,32 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift } else { - i + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -199,13 +542,15 @@ impl Default for foo { impl foo { #[inline] pub fn a(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_a(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -214,7 +559,10 @@ impl foo { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -224,12 +572,10 @@ impl foo { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -238,14 +584,13 @@ impl foo { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u8 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u8 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/issue-1034.rs b/bindgen-tests/tests/expectations/tests/issue-1034.rs index 90cc768a94..2d8b7e23d8 100644 --- a/bindgen-tests/tests/expectations/tests/issue-1034.rs +++ b/bindgen-tests/tests/expectations/tests/issue-1034.rs @@ -75,16 +75,29 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -95,17 +108,32 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/issue-1076-unnamed-bitfield-alignment.rs b/bindgen-tests/tests/expectations/tests/issue-1076-unnamed-bitfield-alignment.rs index 50e9283b5a..505b29b655 100644 --- a/bindgen-tests/tests/expectations/tests/issue-1076-unnamed-bitfield-alignment.rs +++ b/bindgen-tests/tests/expectations/tests/issue-1076-unnamed-bitfield-alignment.rs @@ -75,16 +75,29 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -95,17 +108,32 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/issue-1947.rs b/bindgen-tests/tests/expectations/tests/issue-1947.rs index 795b033a12..cc3763057a 100644 --- a/bindgen-tests/tests/expectations/tests/issue-1947.rs +++ b/bindgen-tests/tests/expectations/tests/issue-1947.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift } else { - i + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -168,13 +511,15 @@ const _: () = { impl V56AMDY { #[inline] pub fn MADZ(&self) -> U16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 10u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 10u8>() as u16) + } } #[inline] pub fn set_MADZ(&mut self, val: U16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 10u8, val as u64) + self._bitfield_1.set_const::<0usize, 10u8>(val as u64) } } #[inline] @@ -183,8 +528,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 10u8) - as u16, + >>::raw_get_const::< + 0usize, + 10u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -194,23 +541,23 @@ impl V56AMDY { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 10u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn MAI0(&self) -> U16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(10usize, 2u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<10usize, 2u8>() as u16) + } } #[inline] pub fn set_MAI0(&mut self, val: U16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(10usize, 2u8, val as u64) + self._bitfield_1.set_const::<10usize, 2u8>(val as u64) } } #[inline] @@ -219,8 +566,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 10usize, 2u8) - as u16, + >>::raw_get_const::< + 10usize, + 2u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -230,23 +579,23 @@ impl V56AMDY { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 10usize, 2u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn MAI1(&self) -> U16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(12usize, 2u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<12usize, 2u8>() as u16) + } } #[inline] pub fn set_MAI1(&mut self, val: U16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(12usize, 2u8, val as u64) + self._bitfield_1.set_const::<12usize, 2u8>(val as u64) } } #[inline] @@ -255,8 +604,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 12usize, 2u8) - as u16, + >>::raw_get_const::< + 12usize, + 2u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -266,23 +617,23 @@ impl V56AMDY { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 12usize, 2u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn MAI2(&self) -> U16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(14usize, 2u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<14usize, 2u8>() as u16) + } } #[inline] pub fn set_MAI2(&mut self, val: U16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(14usize, 2u8, val as u64) + self._bitfield_1.set_const::<14usize, 2u8>(val as u64) } } #[inline] @@ -291,8 +642,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 14usize, 2u8) - as u16, + >>::raw_get_const::< + 14usize, + 2u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -302,12 +655,10 @@ impl V56AMDY { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 14usize, 2u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -319,52 +670,50 @@ impl V56AMDY { ) -> __BindgenBitfieldUnit<[u8; 2usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 2usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 10u8, - { - let MADZ: u16 = unsafe { ::std::mem::transmute(MADZ) }; - MADZ as u64 - }, - ); + >({ + let MADZ: u16 = unsafe { ::std::mem::transmute(MADZ) }; + MADZ as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 10usize, 2u8, - { - let MAI0: u16 = unsafe { ::std::mem::transmute(MAI0) }; - MAI0 as u64 - }, - ); + >({ + let MAI0: u16 = unsafe { ::std::mem::transmute(MAI0) }; + MAI0 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 12usize, 2u8, - { - let MAI1: u16 = unsafe { ::std::mem::transmute(MAI1) }; - MAI1 as u64 - }, - ); + >({ + let MAI1: u16 = unsafe { ::std::mem::transmute(MAI1) }; + MAI1 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 14usize, 2u8, - { - let MAI2: u16 = unsafe { ::std::mem::transmute(MAI2) }; - MAI2 as u64 - }, - ); + >({ + let MAI2: u16 = unsafe { ::std::mem::transmute(MAI2) }; + MAI2 as u64 + }); __bindgen_bitfield_unit } #[inline] pub fn MATH(&self) -> U16 { - unsafe { ::std::mem::transmute(self._bitfield_2.get(0usize, 10u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<0usize, 10u8>() as u16) + } } #[inline] pub fn set_MATH(&mut self, val: U16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_2.set(0usize, 10u8, val as u64) + self._bitfield_2.set_const::<0usize, 10u8>(val as u64) } } #[inline] @@ -373,8 +722,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 0usize, 10u8) - as u16, + >>::raw_get_const::< + 0usize, + 10u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u16, ) } } @@ -384,23 +735,23 @@ impl V56AMDY { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 0usize, 10u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn MATE(&self) -> U16 { - unsafe { ::std::mem::transmute(self._bitfield_2.get(10usize, 4u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<10usize, 4u8>() as u16) + } } #[inline] pub fn set_MATE(&mut self, val: U16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_2.set(10usize, 4u8, val as u64) + self._bitfield_2.set_const::<10usize, 4u8>(val as u64) } } #[inline] @@ -409,8 +760,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 10usize, 4u8) - as u16, + >>::raw_get_const::< + 10usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u16, ) } } @@ -420,23 +773,23 @@ impl V56AMDY { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 10usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn MATW(&self) -> U16 { - unsafe { ::std::mem::transmute(self._bitfield_2.get(14usize, 2u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<14usize, 2u8>() as u16) + } } #[inline] pub fn set_MATW(&mut self, val: U16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_2.set(14usize, 2u8, val as u64) + self._bitfield_2.set_const::<14usize, 2u8>(val as u64) } } #[inline] @@ -445,8 +798,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 14usize, 2u8) - as u16, + >>::raw_get_const::< + 14usize, + 2u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u16, ) } } @@ -456,23 +811,23 @@ impl V56AMDY { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 14usize, 2u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn MASW(&self) -> U8 { - unsafe { ::std::mem::transmute(self._bitfield_2.get(16usize, 4u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<16usize, 4u8>() as u8) + } } #[inline] pub fn set_MASW(&mut self, val: U8) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_2.set(16usize, 4u8, val as u64) + self._bitfield_2.set_const::<16usize, 4u8>(val as u64) } } #[inline] @@ -481,8 +836,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 16usize, 4u8) - as u8, + >>::raw_get_const::< + 16usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u8, ) } } @@ -492,23 +849,23 @@ impl V56AMDY { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 16usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn MABW(&self) -> U8 { - unsafe { ::std::mem::transmute(self._bitfield_2.get(20usize, 3u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<20usize, 3u8>() as u8) + } } #[inline] pub fn set_MABW(&mut self, val: U8) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_2.set(20usize, 3u8, val as u64) + self._bitfield_2.set_const::<20usize, 3u8>(val as u64) } } #[inline] @@ -517,8 +874,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 20usize, 3u8) - as u8, + >>::raw_get_const::< + 20usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u8, ) } } @@ -528,23 +887,23 @@ impl V56AMDY { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 20usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn MAXN(&self) -> U8 { - unsafe { ::std::mem::transmute(self._bitfield_2.get(23usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<23usize, 1u8>() as u8) + } } #[inline] pub fn set_MAXN(&mut self, val: U8) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_2.set(23usize, 1u8, val as u64) + self._bitfield_2.set_const::<23usize, 1u8>(val as u64) } } #[inline] @@ -553,8 +912,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 23usize, 1u8) - as u8, + >>::raw_get_const::< + 23usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u8, ) } } @@ -564,12 +925,10 @@ impl V56AMDY { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 23usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] @@ -583,59 +942,53 @@ impl V56AMDY { ) -> __BindgenBitfieldUnit<[u8; 3usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 3usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 10u8, - { - let MATH: u16 = unsafe { ::std::mem::transmute(MATH) }; - MATH as u64 - }, - ); + >({ + let MATH: u16 = unsafe { ::std::mem::transmute(MATH) }; + MATH as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 10usize, 4u8, - { - let MATE: u16 = unsafe { ::std::mem::transmute(MATE) }; - MATE as u64 - }, - ); + >({ + let MATE: u16 = unsafe { ::std::mem::transmute(MATE) }; + MATE as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 14usize, 2u8, - { - let MATW: u16 = unsafe { ::std::mem::transmute(MATW) }; - MATW as u64 - }, - ); + >({ + let MATW: u16 = unsafe { ::std::mem::transmute(MATW) }; + MATW as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 4u8, - { - let MASW: u8 = unsafe { ::std::mem::transmute(MASW) }; - MASW as u64 - }, - ); + >({ + let MASW: u8 = unsafe { ::std::mem::transmute(MASW) }; + MASW as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 20usize, 3u8, - { - let MABW: u8 = unsafe { ::std::mem::transmute(MABW) }; - MABW as u64 - }, - ); + >({ + let MABW: u8 = unsafe { ::std::mem::transmute(MABW) }; + MABW as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 23usize, 1u8, - { - let MAXN: u8 = unsafe { ::std::mem::transmute(MAXN) }; - MAXN as u64 - }, - ); + >({ + let MAXN: u8 = unsafe { ::std::mem::transmute(MAXN) }; + MAXN as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/issue-739-pointer-wide-bitfield.rs b/bindgen-tests/tests/expectations/tests/issue-739-pointer-wide-bitfield.rs index bc1951e7d1..21d673e278 100644 --- a/bindgen-tests/tests/expectations/tests/issue-739-pointer-wide-bitfield.rs +++ b/bindgen-tests/tests/expectations/tests/issue-739-pointer-wide-bitfield.rs @@ -76,17 +76,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -96,17 +109,32 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -116,15 +144,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -134,15 +184,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } } else { - i + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -160,13 +503,15 @@ const _: () = { impl Foo { #[inline] pub fn m_bitfield(&self) -> ::std::os::raw::c_ulong { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 64u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 64u8>() as u64) + } } #[inline] pub fn set_m_bitfield(&mut self, val: ::std::os::raw::c_ulong) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 64u8, val as u64) + self._bitfield_1.set_const::<0usize, 64u8>(val as u64) } } #[inline] @@ -175,8 +520,10 @@ impl Foo { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 32usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 64u8) - as u64, + >>::raw_get_const::< + 0usize, + 64u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -186,23 +533,23 @@ impl Foo { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 32usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 64u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m_bar(&self) -> ::std::os::raw::c_ulong { - unsafe { ::std::mem::transmute(self._bitfield_1.get(64usize, 64u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<64usize, 64u8>() as u64) + } } #[inline] pub fn set_m_bar(&mut self, val: ::std::os::raw::c_ulong) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(64usize, 64u8, val as u64) + self._bitfield_1.set_const::<64usize, 64u8>(val as u64) } } #[inline] @@ -211,8 +558,10 @@ impl Foo { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 32usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 64usize, 64u8) - as u64, + >>::raw_get_const::< + 64usize, + 64u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -222,23 +571,23 @@ impl Foo { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 32usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 64usize, 64u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn foo(&self) -> ::std::os::raw::c_ulong { - unsafe { ::std::mem::transmute(self._bitfield_1.get(128usize, 1u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<128usize, 1u8>() as u64) + } } #[inline] pub fn set_foo(&mut self, val: ::std::os::raw::c_ulong) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(128usize, 1u8, val as u64) + self._bitfield_1.set_const::<128usize, 1u8>(val as u64) } } #[inline] @@ -247,8 +596,10 @@ impl Foo { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 32usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 128usize, 1u8) - as u64, + >>::raw_get_const::< + 128usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -258,23 +609,23 @@ impl Foo { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 32usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 128usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bar(&self) -> ::std::os::raw::c_ulong { - unsafe { ::std::mem::transmute(self._bitfield_1.get(192usize, 64u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<192usize, 64u8>() as u64) + } } #[inline] pub fn set_bar(&mut self, val: ::std::os::raw::c_ulong) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(192usize, 64u8, val as u64) + self._bitfield_1.set_const::<192usize, 64u8>(val as u64) } } #[inline] @@ -283,8 +634,10 @@ impl Foo { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 32usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 192usize, 64u8) - as u64, + >>::raw_get_const::< + 192usize, + 64u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -294,12 +647,10 @@ impl Foo { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 32usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 192usize, 64u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -311,41 +662,37 @@ impl Foo { ) -> __BindgenBitfieldUnit<[u8; 32usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 32usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 64u8, - { - let m_bitfield: u64 = unsafe { ::std::mem::transmute(m_bitfield) }; - m_bitfield as u64 - }, - ); + >({ + let m_bitfield: u64 = unsafe { ::std::mem::transmute(m_bitfield) }; + m_bitfield as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 64usize, 64u8, - { - let m_bar: u64 = unsafe { ::std::mem::transmute(m_bar) }; - m_bar as u64 - }, - ); + >({ + let m_bar: u64 = unsafe { ::std::mem::transmute(m_bar) }; + m_bar as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 128usize, 1u8, - { - let foo: u64 = unsafe { ::std::mem::transmute(foo) }; - foo as u64 - }, - ); + >({ + let foo: u64 = unsafe { ::std::mem::transmute(foo) }; + foo as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 192usize, 64u8, - { - let bar: u64 = unsafe { ::std::mem::transmute(bar) }; - bar as u64 - }, - ); + >({ + let bar: u64 = unsafe { ::std::mem::transmute(bar) }; + bar as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/issue-743.rs b/bindgen-tests/tests/expectations/tests/issue-743.rs index af3eb5bf6e..aa69c7ea9a 100644 --- a/bindgen-tests/tests/expectations/tests/issue-743.rs +++ b/bindgen-tests/tests/expectations/tests/issue-743.rs @@ -75,16 +75,29 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -95,17 +108,32 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift } else { - i + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -172,13 +515,15 @@ impl Default for S { impl S { #[inline] pub fn u(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 16u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 16u8>() as u32) + } } #[inline] pub fn set_u(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 16u8, val as u64) + self._bitfield_1.set_const::<0usize, 16u8>(val as u64) } } #[inline] @@ -187,8 +532,10 @@ impl S { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 16u8) - as u32, + >>::raw_get_const::< + 0usize, + 16u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -198,12 +545,10 @@ impl S { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 16u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -212,14 +557,13 @@ impl S { ) -> __BindgenBitfieldUnit<[u8; 2usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 2usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 16u8, - { - let u: u32 = unsafe { ::std::mem::transmute(u) }; - u as u64 - }, - ); + >({ + let u: u32 = unsafe { ::std::mem::transmute(u) }; + u as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/issue-816.rs b/bindgen-tests/tests/expectations/tests/issue-816.rs index b1494afede..a874606da7 100644 --- a/bindgen-tests/tests/expectations/tests/issue-816.rs +++ b/bindgen-tests/tests/expectations/tests/issue-816.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } } else { - i + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -159,13 +502,15 @@ const _: () = { impl capabilities { #[inline] pub fn bit_1(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_1(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -174,8 +519,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u32, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -185,23 +532,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_2(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_2(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -210,8 +557,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) - as u32, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -221,23 +570,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_3(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_3(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -246,8 +595,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) - as u32, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -257,23 +608,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_4(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<3usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_4(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(3usize, 1u8, val as u64) + self._bitfield_1.set_const::<3usize, 1u8>(val as u64) } } #[inline] @@ -282,8 +633,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 3usize, 1u8) - as u32, + >>::raw_get_const::< + 3usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -293,23 +646,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 3usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_5(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_5(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 1u8, val as u64) + self._bitfield_1.set_const::<4usize, 1u8>(val as u64) } } #[inline] @@ -318,8 +671,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 1u8) - as u32, + >>::raw_get_const::< + 4usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -329,23 +684,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_6(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<5usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_6(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(5usize, 1u8, val as u64) + self._bitfield_1.set_const::<5usize, 1u8>(val as u64) } } #[inline] @@ -354,8 +709,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 5usize, 1u8) - as u32, + >>::raw_get_const::< + 5usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -365,23 +722,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 5usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_7(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<6usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_7(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(6usize, 1u8, val as u64) + self._bitfield_1.set_const::<6usize, 1u8>(val as u64) } } #[inline] @@ -390,8 +747,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 6usize, 1u8) - as u32, + >>::raw_get_const::< + 6usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -401,23 +760,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 6usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_8(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(7usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<7usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_8(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(7usize, 1u8, val as u64) + self._bitfield_1.set_const::<7usize, 1u8>(val as u64) } } #[inline] @@ -426,8 +785,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 7usize, 1u8) - as u32, + >>::raw_get_const::< + 7usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -437,23 +798,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 7usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_9(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<8usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_9(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(8usize, 1u8, val as u64) + self._bitfield_1.set_const::<8usize, 1u8>(val as u64) } } #[inline] @@ -462,8 +823,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 8usize, 1u8) - as u32, + >>::raw_get_const::< + 8usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -473,23 +836,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 8usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_10(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(9usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<9usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_10(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(9usize, 1u8, val as u64) + self._bitfield_1.set_const::<9usize, 1u8>(val as u64) } } #[inline] @@ -498,8 +861,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 9usize, 1u8) - as u32, + >>::raw_get_const::< + 9usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -509,23 +874,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 9usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_11(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(10usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<10usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_11(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(10usize, 1u8, val as u64) + self._bitfield_1.set_const::<10usize, 1u8>(val as u64) } } #[inline] @@ -534,8 +899,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 10usize, 1u8) - as u32, + >>::raw_get_const::< + 10usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -545,23 +912,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 10usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_12(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(11usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<11usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_12(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(11usize, 1u8, val as u64) + self._bitfield_1.set_const::<11usize, 1u8>(val as u64) } } #[inline] @@ -570,8 +937,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 11usize, 1u8) - as u32, + >>::raw_get_const::< + 11usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -581,23 +950,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 11usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_13(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(12usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<12usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_13(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(12usize, 1u8, val as u64) + self._bitfield_1.set_const::<12usize, 1u8>(val as u64) } } #[inline] @@ -606,8 +975,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 12usize, 1u8) - as u32, + >>::raw_get_const::< + 12usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -617,23 +988,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 12usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_14(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(13usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<13usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_14(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(13usize, 1u8, val as u64) + self._bitfield_1.set_const::<13usize, 1u8>(val as u64) } } #[inline] @@ -642,8 +1013,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 13usize, 1u8) - as u32, + >>::raw_get_const::< + 13usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -653,23 +1026,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 13usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_15(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(14usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<14usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_15(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(14usize, 1u8, val as u64) + self._bitfield_1.set_const::<14usize, 1u8>(val as u64) } } #[inline] @@ -678,8 +1051,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 14usize, 1u8) - as u32, + >>::raw_get_const::< + 14usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -689,23 +1064,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 14usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_16(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(15usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<15usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_16(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(15usize, 1u8, val as u64) + self._bitfield_1.set_const::<15usize, 1u8>(val as u64) } } #[inline] @@ -714,8 +1089,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 15usize, 1u8) - as u32, + >>::raw_get_const::< + 15usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -725,23 +1102,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 15usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_17(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<16usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_17(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 1u8, val as u64) + self._bitfield_1.set_const::<16usize, 1u8>(val as u64) } } #[inline] @@ -750,8 +1127,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 16usize, 1u8) - as u32, + >>::raw_get_const::< + 16usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -761,23 +1140,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 16usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_18(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(17usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<17usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_18(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(17usize, 1u8, val as u64) + self._bitfield_1.set_const::<17usize, 1u8>(val as u64) } } #[inline] @@ -786,8 +1165,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 17usize, 1u8) - as u32, + >>::raw_get_const::< + 17usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -797,23 +1178,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 17usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_19(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(18usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<18usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_19(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(18usize, 1u8, val as u64) + self._bitfield_1.set_const::<18usize, 1u8>(val as u64) } } #[inline] @@ -822,8 +1203,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 18usize, 1u8) - as u32, + >>::raw_get_const::< + 18usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -833,23 +1216,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 18usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_20(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(19usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<19usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_20(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(19usize, 1u8, val as u64) + self._bitfield_1.set_const::<19usize, 1u8>(val as u64) } } #[inline] @@ -858,8 +1241,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 19usize, 1u8) - as u32, + >>::raw_get_const::< + 19usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -869,23 +1254,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 19usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_21(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(20usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<20usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_21(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(20usize, 1u8, val as u64) + self._bitfield_1.set_const::<20usize, 1u8>(val as u64) } } #[inline] @@ -894,8 +1279,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 20usize, 1u8) - as u32, + >>::raw_get_const::< + 20usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -905,23 +1292,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 20usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_22(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(21usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<21usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_22(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(21usize, 1u8, val as u64) + self._bitfield_1.set_const::<21usize, 1u8>(val as u64) } } #[inline] @@ -930,8 +1317,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 21usize, 1u8) - as u32, + >>::raw_get_const::< + 21usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -941,23 +1330,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 21usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_23(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(22usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<22usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_23(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(22usize, 1u8, val as u64) + self._bitfield_1.set_const::<22usize, 1u8>(val as u64) } } #[inline] @@ -966,8 +1355,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 22usize, 1u8) - as u32, + >>::raw_get_const::< + 22usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -977,23 +1368,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 22usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_24(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(23usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<23usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_24(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(23usize, 1u8, val as u64) + self._bitfield_1.set_const::<23usize, 1u8>(val as u64) } } #[inline] @@ -1002,8 +1393,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 23usize, 1u8) - as u32, + >>::raw_get_const::< + 23usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1013,23 +1406,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 23usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_25(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<24usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_25(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(24usize, 1u8, val as u64) + self._bitfield_1.set_const::<24usize, 1u8>(val as u64) } } #[inline] @@ -1038,8 +1431,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 24usize, 1u8) - as u32, + >>::raw_get_const::< + 24usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1049,23 +1444,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 24usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_26(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(25usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<25usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_26(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(25usize, 1u8, val as u64) + self._bitfield_1.set_const::<25usize, 1u8>(val as u64) } } #[inline] @@ -1074,8 +1469,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 25usize, 1u8) - as u32, + >>::raw_get_const::< + 25usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1085,23 +1482,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 25usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_27(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(26usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<26usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_27(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(26usize, 1u8, val as u64) + self._bitfield_1.set_const::<26usize, 1u8>(val as u64) } } #[inline] @@ -1110,8 +1507,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 26usize, 1u8) - as u32, + >>::raw_get_const::< + 26usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1121,23 +1520,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 26usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_28(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(27usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<27usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_28(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(27usize, 1u8, val as u64) + self._bitfield_1.set_const::<27usize, 1u8>(val as u64) } } #[inline] @@ -1146,8 +1545,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 27usize, 1u8) - as u32, + >>::raw_get_const::< + 27usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1157,23 +1558,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 27usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_29(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(28usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<28usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_29(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(28usize, 1u8, val as u64) + self._bitfield_1.set_const::<28usize, 1u8>(val as u64) } } #[inline] @@ -1182,8 +1583,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 28usize, 1u8) - as u32, + >>::raw_get_const::< + 28usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1193,23 +1596,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 28usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_30(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(29usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<29usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_30(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(29usize, 1u8, val as u64) + self._bitfield_1.set_const::<29usize, 1u8>(val as u64) } } #[inline] @@ -1218,8 +1621,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 29usize, 1u8) - as u32, + >>::raw_get_const::< + 29usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1229,23 +1634,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 29usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_31(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(30usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<30usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_31(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(30usize, 1u8, val as u64) + self._bitfield_1.set_const::<30usize, 1u8>(val as u64) } } #[inline] @@ -1254,8 +1659,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 30usize, 1u8) - as u32, + >>::raw_get_const::< + 30usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1265,23 +1672,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 30usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_32(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(31usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<31usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_32(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(31usize, 1u8, val as u64) + self._bitfield_1.set_const::<31usize, 1u8>(val as u64) } } #[inline] @@ -1290,8 +1697,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 31usize, 1u8) - as u32, + >>::raw_get_const::< + 31usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1301,23 +1710,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 31usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_33(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(32usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<32usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_33(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(32usize, 1u8, val as u64) + self._bitfield_1.set_const::<32usize, 1u8>(val as u64) } } #[inline] @@ -1326,8 +1735,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 32usize, 1u8) - as u32, + >>::raw_get_const::< + 32usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1337,23 +1748,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 32usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_34(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(33usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<33usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_34(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(33usize, 1u8, val as u64) + self._bitfield_1.set_const::<33usize, 1u8>(val as u64) } } #[inline] @@ -1362,8 +1773,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 33usize, 1u8) - as u32, + >>::raw_get_const::< + 33usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1373,23 +1786,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 33usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_35(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(34usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<34usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_35(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(34usize, 1u8, val as u64) + self._bitfield_1.set_const::<34usize, 1u8>(val as u64) } } #[inline] @@ -1398,8 +1811,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 34usize, 1u8) - as u32, + >>::raw_get_const::< + 34usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1409,23 +1824,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 34usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_36(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(35usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<35usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_36(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(35usize, 1u8, val as u64) + self._bitfield_1.set_const::<35usize, 1u8>(val as u64) } } #[inline] @@ -1434,8 +1849,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 35usize, 1u8) - as u32, + >>::raw_get_const::< + 35usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1445,23 +1862,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 35usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_37(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(36usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<36usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_37(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(36usize, 1u8, val as u64) + self._bitfield_1.set_const::<36usize, 1u8>(val as u64) } } #[inline] @@ -1470,8 +1887,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 36usize, 1u8) - as u32, + >>::raw_get_const::< + 36usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1481,23 +1900,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 36usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_38(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(37usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<37usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_38(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(37usize, 1u8, val as u64) + self._bitfield_1.set_const::<37usize, 1u8>(val as u64) } } #[inline] @@ -1506,8 +1925,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 37usize, 1u8) - as u32, + >>::raw_get_const::< + 37usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1517,23 +1938,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 37usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_39(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(38usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<38usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_39(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(38usize, 1u8, val as u64) + self._bitfield_1.set_const::<38usize, 1u8>(val as u64) } } #[inline] @@ -1542,8 +1963,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 38usize, 1u8) - as u32, + >>::raw_get_const::< + 38usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1553,23 +1976,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 38usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_40(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(39usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<39usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_40(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(39usize, 1u8, val as u64) + self._bitfield_1.set_const::<39usize, 1u8>(val as u64) } } #[inline] @@ -1578,8 +2001,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 39usize, 1u8) - as u32, + >>::raw_get_const::< + 39usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1589,23 +2014,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 39usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_41(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(40usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<40usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_41(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(40usize, 1u8, val as u64) + self._bitfield_1.set_const::<40usize, 1u8>(val as u64) } } #[inline] @@ -1614,8 +2039,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 40usize, 1u8) - as u32, + >>::raw_get_const::< + 40usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1625,12 +2052,10 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 40usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -1679,374 +2104,333 @@ impl capabilities { ) -> __BindgenBitfieldUnit<[u8; 16usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 16usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let bit_1: u32 = unsafe { ::std::mem::transmute(bit_1) }; - bit_1 as u64 - }, - ); + >({ + let bit_1: u32 = unsafe { ::std::mem::transmute(bit_1) }; + bit_1 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let bit_2: u32 = unsafe { ::std::mem::transmute(bit_2) }; - bit_2 as u64 - }, - ); + >({ + let bit_2: u32 = unsafe { ::std::mem::transmute(bit_2) }; + bit_2 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let bit_3: u32 = unsafe { ::std::mem::transmute(bit_3) }; - bit_3 as u64 - }, - ); + >({ + let bit_3: u32 = unsafe { ::std::mem::transmute(bit_3) }; + bit_3 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 3usize, 1u8, - { - let bit_4: u32 = unsafe { ::std::mem::transmute(bit_4) }; - bit_4 as u64 - }, - ); + >({ + let bit_4: u32 = unsafe { ::std::mem::transmute(bit_4) }; + bit_4 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 1u8, - { - let bit_5: u32 = unsafe { ::std::mem::transmute(bit_5) }; - bit_5 as u64 - }, - ); + >({ + let bit_5: u32 = unsafe { ::std::mem::transmute(bit_5) }; + bit_5 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 5usize, 1u8, - { - let bit_6: u32 = unsafe { ::std::mem::transmute(bit_6) }; - bit_6 as u64 - }, - ); + >({ + let bit_6: u32 = unsafe { ::std::mem::transmute(bit_6) }; + bit_6 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 6usize, 1u8, - { - let bit_7: u32 = unsafe { ::std::mem::transmute(bit_7) }; - bit_7 as u64 - }, - ); + >({ + let bit_7: u32 = unsafe { ::std::mem::transmute(bit_7) }; + bit_7 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 7usize, 1u8, - { - let bit_8: u32 = unsafe { ::std::mem::transmute(bit_8) }; - bit_8 as u64 - }, - ); + >({ + let bit_8: u32 = unsafe { ::std::mem::transmute(bit_8) }; + bit_8 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 8usize, 1u8, - { - let bit_9: u32 = unsafe { ::std::mem::transmute(bit_9) }; - bit_9 as u64 - }, - ); + >({ + let bit_9: u32 = unsafe { ::std::mem::transmute(bit_9) }; + bit_9 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 9usize, 1u8, - { - let bit_10: u32 = unsafe { ::std::mem::transmute(bit_10) }; - bit_10 as u64 - }, - ); + >({ + let bit_10: u32 = unsafe { ::std::mem::transmute(bit_10) }; + bit_10 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 10usize, 1u8, - { - let bit_11: u32 = unsafe { ::std::mem::transmute(bit_11) }; - bit_11 as u64 - }, - ); + >({ + let bit_11: u32 = unsafe { ::std::mem::transmute(bit_11) }; + bit_11 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 11usize, 1u8, - { - let bit_12: u32 = unsafe { ::std::mem::transmute(bit_12) }; - bit_12 as u64 - }, - ); + >({ + let bit_12: u32 = unsafe { ::std::mem::transmute(bit_12) }; + bit_12 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 12usize, 1u8, - { - let bit_13: u32 = unsafe { ::std::mem::transmute(bit_13) }; - bit_13 as u64 - }, - ); + >({ + let bit_13: u32 = unsafe { ::std::mem::transmute(bit_13) }; + bit_13 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 13usize, 1u8, - { - let bit_14: u32 = unsafe { ::std::mem::transmute(bit_14) }; - bit_14 as u64 - }, - ); + >({ + let bit_14: u32 = unsafe { ::std::mem::transmute(bit_14) }; + bit_14 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 14usize, 1u8, - { - let bit_15: u32 = unsafe { ::std::mem::transmute(bit_15) }; - bit_15 as u64 - }, - ); + >({ + let bit_15: u32 = unsafe { ::std::mem::transmute(bit_15) }; + bit_15 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 15usize, 1u8, - { - let bit_16: u32 = unsafe { ::std::mem::transmute(bit_16) }; - bit_16 as u64 - }, - ); + >({ + let bit_16: u32 = unsafe { ::std::mem::transmute(bit_16) }; + bit_16 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 1u8, - { - let bit_17: u32 = unsafe { ::std::mem::transmute(bit_17) }; - bit_17 as u64 - }, - ); + >({ + let bit_17: u32 = unsafe { ::std::mem::transmute(bit_17) }; + bit_17 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 17usize, 1u8, - { - let bit_18: u32 = unsafe { ::std::mem::transmute(bit_18) }; - bit_18 as u64 - }, - ); + >({ + let bit_18: u32 = unsafe { ::std::mem::transmute(bit_18) }; + bit_18 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 18usize, 1u8, - { - let bit_19: u32 = unsafe { ::std::mem::transmute(bit_19) }; - bit_19 as u64 - }, - ); + >({ + let bit_19: u32 = unsafe { ::std::mem::transmute(bit_19) }; + bit_19 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 19usize, 1u8, - { - let bit_20: u32 = unsafe { ::std::mem::transmute(bit_20) }; - bit_20 as u64 - }, - ); + >({ + let bit_20: u32 = unsafe { ::std::mem::transmute(bit_20) }; + bit_20 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 20usize, 1u8, - { - let bit_21: u32 = unsafe { ::std::mem::transmute(bit_21) }; - bit_21 as u64 - }, - ); + >({ + let bit_21: u32 = unsafe { ::std::mem::transmute(bit_21) }; + bit_21 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 21usize, 1u8, - { - let bit_22: u32 = unsafe { ::std::mem::transmute(bit_22) }; - bit_22 as u64 - }, - ); + >({ + let bit_22: u32 = unsafe { ::std::mem::transmute(bit_22) }; + bit_22 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 22usize, 1u8, - { - let bit_23: u32 = unsafe { ::std::mem::transmute(bit_23) }; - bit_23 as u64 - }, - ); + >({ + let bit_23: u32 = unsafe { ::std::mem::transmute(bit_23) }; + bit_23 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 23usize, 1u8, - { - let bit_24: u32 = unsafe { ::std::mem::transmute(bit_24) }; - bit_24 as u64 - }, - ); + >({ + let bit_24: u32 = unsafe { ::std::mem::transmute(bit_24) }; + bit_24 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 24usize, 1u8, - { - let bit_25: u32 = unsafe { ::std::mem::transmute(bit_25) }; - bit_25 as u64 - }, - ); + >({ + let bit_25: u32 = unsafe { ::std::mem::transmute(bit_25) }; + bit_25 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 25usize, 1u8, - { - let bit_26: u32 = unsafe { ::std::mem::transmute(bit_26) }; - bit_26 as u64 - }, - ); + >({ + let bit_26: u32 = unsafe { ::std::mem::transmute(bit_26) }; + bit_26 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 26usize, 1u8, - { - let bit_27: u32 = unsafe { ::std::mem::transmute(bit_27) }; - bit_27 as u64 - }, - ); + >({ + let bit_27: u32 = unsafe { ::std::mem::transmute(bit_27) }; + bit_27 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 27usize, 1u8, - { - let bit_28: u32 = unsafe { ::std::mem::transmute(bit_28) }; - bit_28 as u64 - }, - ); + >({ + let bit_28: u32 = unsafe { ::std::mem::transmute(bit_28) }; + bit_28 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 28usize, 1u8, - { - let bit_29: u32 = unsafe { ::std::mem::transmute(bit_29) }; - bit_29 as u64 - }, - ); + >({ + let bit_29: u32 = unsafe { ::std::mem::transmute(bit_29) }; + bit_29 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 29usize, 1u8, - { - let bit_30: u32 = unsafe { ::std::mem::transmute(bit_30) }; - bit_30 as u64 - }, - ); + >({ + let bit_30: u32 = unsafe { ::std::mem::transmute(bit_30) }; + bit_30 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 30usize, 1u8, - { - let bit_31: u32 = unsafe { ::std::mem::transmute(bit_31) }; - bit_31 as u64 - }, - ); + >({ + let bit_31: u32 = unsafe { ::std::mem::transmute(bit_31) }; + bit_31 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 31usize, 1u8, - { - let bit_32: u32 = unsafe { ::std::mem::transmute(bit_32) }; - bit_32 as u64 - }, - ); + >({ + let bit_32: u32 = unsafe { ::std::mem::transmute(bit_32) }; + bit_32 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 32usize, 1u8, - { - let bit_33: u32 = unsafe { ::std::mem::transmute(bit_33) }; - bit_33 as u64 - }, - ); + >({ + let bit_33: u32 = unsafe { ::std::mem::transmute(bit_33) }; + bit_33 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 33usize, 1u8, - { - let bit_34: u32 = unsafe { ::std::mem::transmute(bit_34) }; - bit_34 as u64 - }, - ); + >({ + let bit_34: u32 = unsafe { ::std::mem::transmute(bit_34) }; + bit_34 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 34usize, 1u8, - { - let bit_35: u32 = unsafe { ::std::mem::transmute(bit_35) }; - bit_35 as u64 - }, - ); + >({ + let bit_35: u32 = unsafe { ::std::mem::transmute(bit_35) }; + bit_35 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 35usize, 1u8, - { - let bit_36: u32 = unsafe { ::std::mem::transmute(bit_36) }; - bit_36 as u64 - }, - ); + >({ + let bit_36: u32 = unsafe { ::std::mem::transmute(bit_36) }; + bit_36 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 36usize, 1u8, - { - let bit_37: u32 = unsafe { ::std::mem::transmute(bit_37) }; - bit_37 as u64 - }, - ); + >({ + let bit_37: u32 = unsafe { ::std::mem::transmute(bit_37) }; + bit_37 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 37usize, 1u8, - { - let bit_38: u32 = unsafe { ::std::mem::transmute(bit_38) }; - bit_38 as u64 - }, - ); + >({ + let bit_38: u32 = unsafe { ::std::mem::transmute(bit_38) }; + bit_38 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 38usize, 1u8, - { - let bit_39: u32 = unsafe { ::std::mem::transmute(bit_39) }; - bit_39 as u64 - }, - ); + >({ + let bit_39: u32 = unsafe { ::std::mem::transmute(bit_39) }; + bit_39 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 39usize, 1u8, - { - let bit_40: u32 = unsafe { ::std::mem::transmute(bit_40) }; - bit_40 as u64 - }, - ); + >({ + let bit_40: u32 = unsafe { ::std::mem::transmute(bit_40) }; + bit_40 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 40usize, 1u8, - { - let bit_41: u32 = unsafe { ::std::mem::transmute(bit_41) }; - bit_41 as u64 - }, - ); + >({ + let bit_41: u32 = unsafe { ::std::mem::transmute(bit_41) }; + bit_41 as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/jsval_layout_opaque.rs b/bindgen-tests/tests/expectations/tests/jsval_layout_opaque.rs index dc0ef8ed7f..041744a2c1 100644 --- a/bindgen-tests/tests/expectations/tests/jsval_layout_opaque.rs +++ b/bindgen-tests/tests/expectations/tests/jsval_layout_opaque.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -269,13 +612,15 @@ impl Default for jsval_layout__bindgen_ty_1 { impl jsval_layout__bindgen_ty_1 { #[inline] pub fn payload47(&self) -> u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 47u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 47u8>() as u64) + } } #[inline] pub fn set_payload47(&mut self, val: u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 47u8, val as u64) + self._bitfield_1.set_const::<0usize, 47u8>(val as u64) } } #[inline] @@ -284,8 +629,10 @@ impl jsval_layout__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 47u8) - as u64, + >>::raw_get_const::< + 0usize, + 47u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -295,23 +642,23 @@ impl jsval_layout__bindgen_ty_1 { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 47u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn tag(&self) -> JSValueTag { - unsafe { ::std::mem::transmute(self._bitfield_1.get(47usize, 17u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<47usize, 17u8>() as u32) + } } #[inline] pub fn set_tag(&mut self, val: JSValueTag) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(47usize, 17u8, val as u64) + self._bitfield_1.set_const::<47usize, 17u8>(val as u64) } } #[inline] @@ -320,8 +667,10 @@ impl jsval_layout__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 47usize, 17u8) - as u32, + >>::raw_get_const::< + 47usize, + 17u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -331,12 +680,10 @@ impl jsval_layout__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 47usize, 17u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -346,23 +693,21 @@ impl jsval_layout__bindgen_ty_1 { ) -> __BindgenBitfieldUnit<[u8; 8usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 47u8, - { - let payload47: u64 = unsafe { ::std::mem::transmute(payload47) }; - payload47 as u64 - }, - ); + >({ + let payload47: u64 = unsafe { ::std::mem::transmute(payload47) }; + payload47 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 47usize, 17u8, - { - let tag: u32 = unsafe { ::std::mem::transmute(tag) }; - tag as u64 - }, - ); + >({ + let tag: u32 = unsafe { ::std::mem::transmute(tag) }; + tag as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/layout_align.rs b/bindgen-tests/tests/expectations/tests/layout_align.rs index a942adb8f2..6439b78a3a 100644 --- a/bindgen-tests/tests/expectations/tests/layout_align.rs +++ b/bindgen-tests/tests/expectations/tests/layout_align.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,17 +108,32 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } } else { - i + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -238,13 +581,15 @@ const _: () = { impl rte_eth_link { #[inline] pub fn link_duplex(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u16) + } } #[inline] pub fn set_link_duplex(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -253,8 +598,10 @@ impl rte_eth_link { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u16, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -264,23 +611,23 @@ impl rte_eth_link { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn link_autoneg(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u16) + } } #[inline] pub fn set_link_autoneg(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -289,8 +636,10 @@ impl rte_eth_link { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) - as u16, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -300,23 +649,23 @@ impl rte_eth_link { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn link_status(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u16) + } } #[inline] pub fn set_link_status(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -325,8 +674,10 @@ impl rte_eth_link { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) - as u16, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -336,12 +687,10 @@ impl rte_eth_link { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -352,34 +701,29 @@ impl rte_eth_link { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let link_duplex: u16 = unsafe { ::std::mem::transmute(link_duplex) }; - link_duplex as u64 - }, - ); + >({ + let link_duplex: u16 = unsafe { ::std::mem::transmute(link_duplex) }; + link_duplex as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let link_autoneg: u16 = unsafe { - ::std::mem::transmute(link_autoneg) - }; - link_autoneg as u64 - }, - ); + >({ + let link_autoneg: u16 = unsafe { ::std::mem::transmute(link_autoneg) }; + link_autoneg as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let link_status: u16 = unsafe { ::std::mem::transmute(link_status) }; - link_status as u64 - }, - ); + >({ + let link_status: u16 = unsafe { ::std::mem::transmute(link_status) }; + link_status as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/layout_eth_conf.rs b/bindgen-tests/tests/expectations/tests/layout_eth_conf.rs index 7d975cd979..ebc22a6398 100644 --- a/bindgen-tests/tests/expectations/tests/layout_eth_conf.rs +++ b/bindgen-tests/tests/expectations/tests/layout_eth_conf.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,17 +108,32 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift } else { - i + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -237,13 +580,15 @@ impl Default for rte_eth_rxmode { impl rte_eth_rxmode { #[inline] pub fn header_split(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u16) + } } #[inline] pub fn set_header_split(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -252,8 +597,10 @@ impl rte_eth_rxmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u16, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -263,23 +610,23 @@ impl rte_eth_rxmode { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn hw_ip_checksum(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u16) + } } #[inline] pub fn set_hw_ip_checksum(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -288,8 +635,10 @@ impl rte_eth_rxmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) - as u16, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -299,23 +648,23 @@ impl rte_eth_rxmode { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn hw_vlan_filter(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u16) + } } #[inline] pub fn set_hw_vlan_filter(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -324,8 +673,10 @@ impl rte_eth_rxmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) - as u16, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -335,23 +686,23 @@ impl rte_eth_rxmode { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn hw_vlan_strip(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<3usize, 1u8>() as u16) + } } #[inline] pub fn set_hw_vlan_strip(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(3usize, 1u8, val as u64) + self._bitfield_1.set_const::<3usize, 1u8>(val as u64) } } #[inline] @@ -360,8 +711,10 @@ impl rte_eth_rxmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 3usize, 1u8) - as u16, + >>::raw_get_const::< + 3usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -371,23 +724,23 @@ impl rte_eth_rxmode { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 3usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn hw_vlan_extend(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 1u8>() as u16) + } } #[inline] pub fn set_hw_vlan_extend(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 1u8, val as u64) + self._bitfield_1.set_const::<4usize, 1u8>(val as u64) } } #[inline] @@ -396,8 +749,10 @@ impl rte_eth_rxmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 1u8) - as u16, + >>::raw_get_const::< + 4usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -407,23 +762,23 @@ impl rte_eth_rxmode { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn jumbo_frame(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<5usize, 1u8>() as u16) + } } #[inline] pub fn set_jumbo_frame(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(5usize, 1u8, val as u64) + self._bitfield_1.set_const::<5usize, 1u8>(val as u64) } } #[inline] @@ -432,8 +787,10 @@ impl rte_eth_rxmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 5usize, 1u8) - as u16, + >>::raw_get_const::< + 5usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -443,23 +800,23 @@ impl rte_eth_rxmode { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 5usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn hw_strip_crc(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<6usize, 1u8>() as u16) + } } #[inline] pub fn set_hw_strip_crc(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(6usize, 1u8, val as u64) + self._bitfield_1.set_const::<6usize, 1u8>(val as u64) } } #[inline] @@ -468,8 +825,10 @@ impl rte_eth_rxmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 6usize, 1u8) - as u16, + >>::raw_get_const::< + 6usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -479,23 +838,23 @@ impl rte_eth_rxmode { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 6usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn enable_scatter(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(7usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<7usize, 1u8>() as u16) + } } #[inline] pub fn set_enable_scatter(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(7usize, 1u8, val as u64) + self._bitfield_1.set_const::<7usize, 1u8>(val as u64) } } #[inline] @@ -504,8 +863,10 @@ impl rte_eth_rxmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 7usize, 1u8) - as u16, + >>::raw_get_const::< + 7usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -515,23 +876,23 @@ impl rte_eth_rxmode { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 7usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn enable_lro(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<8usize, 1u8>() as u16) + } } #[inline] pub fn set_enable_lro(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(8usize, 1u8, val as u64) + self._bitfield_1.set_const::<8usize, 1u8>(val as u64) } } #[inline] @@ -540,8 +901,10 @@ impl rte_eth_rxmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 8usize, 1u8) - as u16, + >>::raw_get_const::< + 8usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -551,12 +914,10 @@ impl rte_eth_rxmode { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 8usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -573,100 +934,85 @@ impl rte_eth_rxmode { ) -> __BindgenBitfieldUnit<[u8; 2usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 2usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let header_split: u16 = unsafe { - ::std::mem::transmute(header_split) - }; - header_split as u64 - }, - ); + >({ + let header_split: u16 = unsafe { ::std::mem::transmute(header_split) }; + header_split as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let hw_ip_checksum: u16 = unsafe { - ::std::mem::transmute(hw_ip_checksum) - }; - hw_ip_checksum as u64 - }, - ); + >({ + let hw_ip_checksum: u16 = unsafe { + ::std::mem::transmute(hw_ip_checksum) + }; + hw_ip_checksum as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let hw_vlan_filter: u16 = unsafe { - ::std::mem::transmute(hw_vlan_filter) - }; - hw_vlan_filter as u64 - }, - ); + >({ + let hw_vlan_filter: u16 = unsafe { + ::std::mem::transmute(hw_vlan_filter) + }; + hw_vlan_filter as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 3usize, 1u8, - { - let hw_vlan_strip: u16 = unsafe { - ::std::mem::transmute(hw_vlan_strip) - }; - hw_vlan_strip as u64 - }, - ); + >({ + let hw_vlan_strip: u16 = unsafe { ::std::mem::transmute(hw_vlan_strip) }; + hw_vlan_strip as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 1u8, - { - let hw_vlan_extend: u16 = unsafe { - ::std::mem::transmute(hw_vlan_extend) - }; - hw_vlan_extend as u64 - }, - ); + >({ + let hw_vlan_extend: u16 = unsafe { + ::std::mem::transmute(hw_vlan_extend) + }; + hw_vlan_extend as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 5usize, 1u8, - { - let jumbo_frame: u16 = unsafe { ::std::mem::transmute(jumbo_frame) }; - jumbo_frame as u64 - }, - ); + >({ + let jumbo_frame: u16 = unsafe { ::std::mem::transmute(jumbo_frame) }; + jumbo_frame as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 6usize, 1u8, - { - let hw_strip_crc: u16 = unsafe { - ::std::mem::transmute(hw_strip_crc) - }; - hw_strip_crc as u64 - }, - ); + >({ + let hw_strip_crc: u16 = unsafe { ::std::mem::transmute(hw_strip_crc) }; + hw_strip_crc as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 7usize, 1u8, - { - let enable_scatter: u16 = unsafe { - ::std::mem::transmute(enable_scatter) - }; - enable_scatter as u64 - }, - ); + >({ + let enable_scatter: u16 = unsafe { + ::std::mem::transmute(enable_scatter) + }; + enable_scatter as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 8usize, 1u8, - { - let enable_lro: u16 = unsafe { ::std::mem::transmute(enable_lro) }; - enable_lro as u64 - }, - ); + >({ + let enable_lro: u16 = unsafe { ::std::mem::transmute(enable_lro) }; + enable_lro as u64 + }); __bindgen_bitfield_unit } } @@ -717,13 +1063,15 @@ impl Default for rte_eth_txmode { impl rte_eth_txmode { #[inline] pub fn hw_vlan_reject_tagged(&self) -> u8 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_hw_vlan_reject_tagged(&mut self, val: u8) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -732,7 +1080,10 @@ impl rte_eth_txmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -742,23 +1093,23 @@ impl rte_eth_txmode { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn hw_vlan_reject_untagged(&self) -> u8 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u8) + } } #[inline] pub fn set_hw_vlan_reject_untagged(&mut self, val: u8) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -767,7 +1118,10 @@ impl rte_eth_txmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) as u8, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -777,23 +1131,23 @@ impl rte_eth_txmode { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn hw_vlan_insert_pvid(&self) -> u8 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u8) + } } #[inline] pub fn set_hw_vlan_insert_pvid(&mut self, val: u8) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -802,7 +1156,10 @@ impl rte_eth_txmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) as u8, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -812,12 +1169,10 @@ impl rte_eth_txmode { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -828,38 +1183,35 @@ impl rte_eth_txmode { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let hw_vlan_reject_tagged: u8 = unsafe { - ::std::mem::transmute(hw_vlan_reject_tagged) - }; - hw_vlan_reject_tagged as u64 - }, - ); + >({ + let hw_vlan_reject_tagged: u8 = unsafe { + ::std::mem::transmute(hw_vlan_reject_tagged) + }; + hw_vlan_reject_tagged as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let hw_vlan_reject_untagged: u8 = unsafe { - ::std::mem::transmute(hw_vlan_reject_untagged) - }; - hw_vlan_reject_untagged as u64 - }, - ); + >({ + let hw_vlan_reject_untagged: u8 = unsafe { + ::std::mem::transmute(hw_vlan_reject_untagged) + }; + hw_vlan_reject_untagged as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let hw_vlan_insert_pvid: u8 = unsafe { - ::std::mem::transmute(hw_vlan_insert_pvid) - }; - hw_vlan_insert_pvid as u64 - }, - ); + >({ + let hw_vlan_insert_pvid: u8 = unsafe { + ::std::mem::transmute(hw_vlan_insert_pvid) + }; + hw_vlan_insert_pvid as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/layout_mbuf.rs b/bindgen-tests/tests/expectations/tests/layout_mbuf.rs index ce6c58e39e..270a22a0c0 100644 --- a/bindgen-tests/tests/expectations/tests/layout_mbuf.rs +++ b/bindgen-tests/tests/expectations/tests/layout_mbuf.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift } else { - i + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -276,13 +619,15 @@ const _: () = { impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { #[inline] pub fn l2_type(&self) -> u32 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 4u8>() as u32) + } } #[inline] pub fn set_l2_type(&mut self, val: u32) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 4u8, val as u64) + self._bitfield_1.set_const::<0usize, 4u8>(val as u64) } } #[inline] @@ -291,8 +636,10 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 4u8) - as u32, + >>::raw_get_const::< + 0usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -302,23 +649,23 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn l3_type(&self) -> u32 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 4u8>() as u32) + } } #[inline] pub fn set_l3_type(&mut self, val: u32) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 4u8, val as u64) + self._bitfield_1.set_const::<4usize, 4u8>(val as u64) } } #[inline] @@ -327,8 +674,10 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 4u8) - as u32, + >>::raw_get_const::< + 4usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -338,23 +687,23 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn l4_type(&self) -> u32 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<8usize, 4u8>() as u32) + } } #[inline] pub fn set_l4_type(&mut self, val: u32) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(8usize, 4u8, val as u64) + self._bitfield_1.set_const::<8usize, 4u8>(val as u64) } } #[inline] @@ -363,8 +712,10 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 8usize, 4u8) - as u32, + >>::raw_get_const::< + 8usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -374,23 +725,23 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 8usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn tun_type(&self) -> u32 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(12usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<12usize, 4u8>() as u32) + } } #[inline] pub fn set_tun_type(&mut self, val: u32) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(12usize, 4u8, val as u64) + self._bitfield_1.set_const::<12usize, 4u8>(val as u64) } } #[inline] @@ -399,8 +750,10 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 12usize, 4u8) - as u32, + >>::raw_get_const::< + 12usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -410,23 +763,23 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 12usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn inner_l2_type(&self) -> u32 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<16usize, 4u8>() as u32) + } } #[inline] pub fn set_inner_l2_type(&mut self, val: u32) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 4u8, val as u64) + self._bitfield_1.set_const::<16usize, 4u8>(val as u64) } } #[inline] @@ -435,8 +788,10 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 16usize, 4u8) - as u32, + >>::raw_get_const::< + 16usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -446,23 +801,23 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 16usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn inner_l3_type(&self) -> u32 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(20usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<20usize, 4u8>() as u32) + } } #[inline] pub fn set_inner_l3_type(&mut self, val: u32) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(20usize, 4u8, val as u64) + self._bitfield_1.set_const::<20usize, 4u8>(val as u64) } } #[inline] @@ -471,8 +826,10 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 20usize, 4u8) - as u32, + >>::raw_get_const::< + 20usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -482,23 +839,23 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 20usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn inner_l4_type(&self) -> u32 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<24usize, 4u8>() as u32) + } } #[inline] pub fn set_inner_l4_type(&mut self, val: u32) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(24usize, 4u8, val as u64) + self._bitfield_1.set_const::<24usize, 4u8>(val as u64) } } #[inline] @@ -507,8 +864,10 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 24usize, 4u8) - as u32, + >>::raw_get_const::< + 24usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -518,12 +877,10 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 24usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -538,74 +895,61 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { ) -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 4u8, - { - let l2_type: u32 = unsafe { ::std::mem::transmute(l2_type) }; - l2_type as u64 - }, - ); + >({ + let l2_type: u32 = unsafe { ::std::mem::transmute(l2_type) }; + l2_type as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 4u8, - { - let l3_type: u32 = unsafe { ::std::mem::transmute(l3_type) }; - l3_type as u64 - }, - ); + >({ + let l3_type: u32 = unsafe { ::std::mem::transmute(l3_type) }; + l3_type as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 8usize, 4u8, - { - let l4_type: u32 = unsafe { ::std::mem::transmute(l4_type) }; - l4_type as u64 - }, - ); + >({ + let l4_type: u32 = unsafe { ::std::mem::transmute(l4_type) }; + l4_type as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 12usize, 4u8, - { - let tun_type: u32 = unsafe { ::std::mem::transmute(tun_type) }; - tun_type as u64 - }, - ); + >({ + let tun_type: u32 = unsafe { ::std::mem::transmute(tun_type) }; + tun_type as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 4u8, - { - let inner_l2_type: u32 = unsafe { - ::std::mem::transmute(inner_l2_type) - }; - inner_l2_type as u64 - }, - ); + >({ + let inner_l2_type: u32 = unsafe { ::std::mem::transmute(inner_l2_type) }; + inner_l2_type as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 20usize, 4u8, - { - let inner_l3_type: u32 = unsafe { - ::std::mem::transmute(inner_l3_type) - }; - inner_l3_type as u64 - }, - ); + >({ + let inner_l3_type: u32 = unsafe { ::std::mem::transmute(inner_l3_type) }; + inner_l3_type as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 24usize, 4u8, - { - let inner_l4_type: u32 = unsafe { - ::std::mem::transmute(inner_l4_type) - }; - inner_l4_type as u64 - }, - ); + >({ + let inner_l4_type: u32 = unsafe { ::std::mem::transmute(inner_l4_type) }; + inner_l4_type as u64 + }); __bindgen_bitfield_unit } } @@ -837,13 +1181,15 @@ const _: () = { impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { #[inline] pub fn l2_len(&self) -> u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 7u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 7u8>() as u64) + } } #[inline] pub fn set_l2_len(&mut self, val: u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 7u8, val as u64) + self._bitfield_1.set_const::<0usize, 7u8>(val as u64) } } #[inline] @@ -852,8 +1198,10 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 7u8) - as u64, + >>::raw_get_const::< + 0usize, + 7u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -863,23 +1211,23 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 7u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn l3_len(&self) -> u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(7usize, 9u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<7usize, 9u8>() as u64) + } } #[inline] pub fn set_l3_len(&mut self, val: u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(7usize, 9u8, val as u64) + self._bitfield_1.set_const::<7usize, 9u8>(val as u64) } } #[inline] @@ -888,8 +1236,10 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 7usize, 9u8) - as u64, + >>::raw_get_const::< + 7usize, + 9u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -899,23 +1249,23 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 7usize, 9u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn l4_len(&self) -> u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 8u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<16usize, 8u8>() as u64) + } } #[inline] pub fn set_l4_len(&mut self, val: u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 8u8, val as u64) + self._bitfield_1.set_const::<16usize, 8u8>(val as u64) } } #[inline] @@ -924,8 +1274,10 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 16usize, 8u8) - as u64, + >>::raw_get_const::< + 16usize, + 8u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -935,23 +1287,23 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 16usize, 8u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn tso_segsz(&self) -> u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 16u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<24usize, 16u8>() as u64) + } } #[inline] pub fn set_tso_segsz(&mut self, val: u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(24usize, 16u8, val as u64) + self._bitfield_1.set_const::<24usize, 16u8>(val as u64) } } #[inline] @@ -960,8 +1312,10 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 24usize, 16u8) - as u64, + >>::raw_get_const::< + 24usize, + 16u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -971,23 +1325,23 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 24usize, 16u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn outer_l3_len(&self) -> u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(40usize, 9u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<40usize, 9u8>() as u64) + } } #[inline] pub fn set_outer_l3_len(&mut self, val: u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(40usize, 9u8, val as u64) + self._bitfield_1.set_const::<40usize, 9u8>(val as u64) } } #[inline] @@ -996,8 +1350,10 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 40usize, 9u8) - as u64, + >>::raw_get_const::< + 40usize, + 9u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -1007,23 +1363,23 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 40usize, 9u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn outer_l2_len(&self) -> u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(49usize, 7u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<49usize, 7u8>() as u64) + } } #[inline] pub fn set_outer_l2_len(&mut self, val: u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(49usize, 7u8, val as u64) + self._bitfield_1.set_const::<49usize, 7u8>(val as u64) } } #[inline] @@ -1032,8 +1388,10 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 49usize, 7u8) - as u64, + >>::raw_get_const::< + 49usize, + 7u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -1043,12 +1401,10 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 49usize, 7u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -1062,63 +1418,53 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { ) -> __BindgenBitfieldUnit<[u8; 7usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 7usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 7u8, - { - let l2_len: u64 = unsafe { ::std::mem::transmute(l2_len) }; - l2_len as u64 - }, - ); + >({ + let l2_len: u64 = unsafe { ::std::mem::transmute(l2_len) }; + l2_len as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 7usize, 9u8, - { - let l3_len: u64 = unsafe { ::std::mem::transmute(l3_len) }; - l3_len as u64 - }, - ); + >({ + let l3_len: u64 = unsafe { ::std::mem::transmute(l3_len) }; + l3_len as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 8u8, - { - let l4_len: u64 = unsafe { ::std::mem::transmute(l4_len) }; - l4_len as u64 - }, - ); + >({ + let l4_len: u64 = unsafe { ::std::mem::transmute(l4_len) }; + l4_len as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 24usize, 16u8, - { - let tso_segsz: u64 = unsafe { ::std::mem::transmute(tso_segsz) }; - tso_segsz as u64 - }, - ); + >({ + let tso_segsz: u64 = unsafe { ::std::mem::transmute(tso_segsz) }; + tso_segsz as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 40usize, 9u8, - { - let outer_l3_len: u64 = unsafe { - ::std::mem::transmute(outer_l3_len) - }; - outer_l3_len as u64 - }, - ); + >({ + let outer_l3_len: u64 = unsafe { ::std::mem::transmute(outer_l3_len) }; + outer_l3_len as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 49usize, 7u8, - { - let outer_l2_len: u64 = unsafe { - ::std::mem::transmute(outer_l2_len) - }; - outer_l2_len as u64 - }, - ); + >({ + let outer_l2_len: u64 = unsafe { ::std::mem::transmute(outer_l2_len) }; + outer_l2_len as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/only_bitfields.rs b/bindgen-tests/tests/expectations/tests/only_bitfields.rs index 9a73fc2fee..6e25fd3a1e 100644 --- a/bindgen-tests/tests/expectations/tests/only_bitfields.rs +++ b/bindgen-tests/tests/expectations/tests/only_bitfields.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -158,13 +501,15 @@ const _: () = { impl C { #[inline] pub fn a(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_a(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -173,7 +518,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -183,23 +531,23 @@ impl C { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 7u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 7u8>() as u8) + } } #[inline] pub fn set_b(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 7u8, val as u64) + self._bitfield_1.set_const::<1usize, 7u8>(val as u64) } } #[inline] @@ -208,7 +556,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 7u8) as u8, + >>::raw_get_const::< + 1usize, + 7u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -218,35 +569,31 @@ impl C { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 7u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(a: bool, b: bool) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u8 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u8 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 7u8, - { - let b: u8 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/packed-bitfield.rs b/bindgen-tests/tests/expectations/tests/packed-bitfield.rs index b5a734454a..a72a42bb11 100644 --- a/bindgen-tests/tests/expectations/tests/packed-bitfield.rs +++ b/bindgen-tests/tests/expectations/tests/packed-bitfield.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift } else { - i + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -158,13 +501,15 @@ const _: () = { impl Date { #[inline] pub fn day(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 5u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 5u8>() as u8) + } } #[inline] pub fn set_day(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 5u8, val as u64) + self._bitfield_1.set_const::<0usize, 5u8>(val as u64) } } #[inline] @@ -173,7 +518,10 @@ impl Date { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 5u8) as u8, + >>::raw_get_const::< + 0usize, + 5u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -183,23 +531,23 @@ impl Date { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 5u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn month(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 4u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<5usize, 4u8>() as u8) + } } #[inline] pub fn set_month(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(5usize, 4u8, val as u64) + self._bitfield_1.set_const::<5usize, 4u8>(val as u64) } } #[inline] @@ -208,7 +556,10 @@ impl Date { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 5usize, 4u8) as u8, + >>::raw_get_const::< + 5usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -218,23 +569,23 @@ impl Date { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 5usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn year(&self) -> ::std::os::raw::c_short { - unsafe { ::std::mem::transmute(self._bitfield_1.get(9usize, 15u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<9usize, 15u8>() as u16) + } } #[inline] pub fn set_year(&mut self, val: ::std::os::raw::c_short) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(9usize, 15u8, val as u64) + self._bitfield_1.set_const::<9usize, 15u8>(val as u64) } } #[inline] @@ -243,8 +594,10 @@ impl Date { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 9usize, 15u8) - as u16, + >>::raw_get_const::< + 9usize, + 15u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -254,12 +607,10 @@ impl Date { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 9usize, 15u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -270,32 +621,29 @@ impl Date { ) -> __BindgenBitfieldUnit<[u8; 3usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 3usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 5u8, - { - let day: u8 = unsafe { ::std::mem::transmute(day) }; - day as u64 - }, - ); + >({ + let day: u8 = unsafe { ::std::mem::transmute(day) }; + day as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 5usize, 4u8, - { - let month: u8 = unsafe { ::std::mem::transmute(month) }; - month as u64 - }, - ); + >({ + let month: u8 = unsafe { ::std::mem::transmute(month) }; + month as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 9usize, 15u8, - { - let year: u16 = unsafe { ::std::mem::transmute(year) }; - year as u64 - }, - ); + >({ + let year: u16 = unsafe { ::std::mem::transmute(year) }; + year as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/private_fields.rs b/bindgen-tests/tests/expectations/tests/private_fields.rs index abb2886d39..9139267ad4 100644 --- a/bindgen-tests/tests/expectations/tests/private_fields.rs +++ b/bindgen-tests/tests/expectations/tests/private_fields.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,17 +108,32 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } } else { - i + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -175,13 +518,15 @@ const _: () = { impl PrivateBitFields { #[inline] fn a(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 4u8>() as u32) + } } #[inline] fn set_a(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 4u8, val as u64) + self._bitfield_1.set_const::<0usize, 4u8>(val as u64) } } #[inline] @@ -190,8 +535,10 @@ impl PrivateBitFields { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 4u8) - as u32, + >>::raw_get_const::< + 0usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -201,23 +548,23 @@ impl PrivateBitFields { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] fn b(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 4u8>() as u32) + } } #[inline] fn set_b(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 4u8, val as u64) + self._bitfield_1.set_const::<4usize, 4u8>(val as u64) } } #[inline] @@ -226,8 +573,10 @@ impl PrivateBitFields { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 4u8) - as u32, + >>::raw_get_const::< + 4usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -237,12 +586,10 @@ impl PrivateBitFields { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -252,23 +599,21 @@ impl PrivateBitFields { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 4u8, - { - let a: u32 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u32 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 4u8, - { - let b: u32 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u32 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } @@ -287,13 +632,15 @@ const _: () = { impl PublicBitFields { #[inline] pub fn a(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 4u8>() as u32) + } } #[inline] pub fn set_a(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 4u8, val as u64) + self._bitfield_1.set_const::<0usize, 4u8>(val as u64) } } #[inline] @@ -302,8 +649,10 @@ impl PublicBitFields { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 4u8) - as u32, + >>::raw_get_const::< + 0usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -313,23 +662,23 @@ impl PublicBitFields { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 4u8>() as u32) + } } #[inline] pub fn set_b(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 4u8, val as u64) + self._bitfield_1.set_const::<4usize, 4u8>(val as u64) } } #[inline] @@ -338,8 +687,10 @@ impl PublicBitFields { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 4u8) - as u32, + >>::raw_get_const::< + 4usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -349,12 +700,10 @@ impl PublicBitFields { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -364,23 +713,21 @@ impl PublicBitFields { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 4u8, - { - let a: u32 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u32 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 4u8, - { - let b: u32 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u32 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } @@ -399,13 +746,15 @@ const _: () = { impl MixedBitFields { #[inline] fn a(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 4u8>() as u32) + } } #[inline] fn set_a(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 4u8, val as u64) + self._bitfield_1.set_const::<0usize, 4u8>(val as u64) } } #[inline] @@ -414,8 +763,10 @@ impl MixedBitFields { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 4u8) - as u32, + >>::raw_get_const::< + 0usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -425,23 +776,23 @@ impl MixedBitFields { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn d(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 4u8>() as u32) + } } #[inline] pub fn set_d(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 4u8, val as u64) + self._bitfield_1.set_const::<4usize, 4u8>(val as u64) } } #[inline] @@ -450,8 +801,10 @@ impl MixedBitFields { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 4u8) - as u32, + >>::raw_get_const::< + 4usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -461,12 +814,10 @@ impl MixedBitFields { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -476,23 +827,21 @@ impl MixedBitFields { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 4u8, - { - let a: u32 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u32 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 4u8, - { - let d: u32 = unsafe { ::std::mem::transmute(d) }; - d as u64 - }, - ); + >({ + let d: u32 = unsafe { ::std::mem::transmute(d) }; + d as u64 + }); __bindgen_bitfield_unit } } @@ -641,13 +990,15 @@ const _: () = { impl Override { #[inline] pub fn bf_a(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 4u8>() as u32) + } } #[inline] pub fn set_bf_a(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 4u8, val as u64) + self._bitfield_1.set_const::<0usize, 4u8>(val as u64) } } #[inline] @@ -656,8 +1007,10 @@ impl Override { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 4u8) - as u32, + >>::raw_get_const::< + 0usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -667,23 +1020,23 @@ impl Override { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] fn bf_b(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 4u8>() as u32) + } } #[inline] fn set_bf_b(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 4u8, val as u64) + self._bitfield_1.set_const::<4usize, 4u8>(val as u64) } } #[inline] @@ -692,8 +1045,10 @@ impl Override { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 4u8) - as u32, + >>::raw_get_const::< + 4usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -703,23 +1058,23 @@ impl Override { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] fn private_bf_c(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<8usize, 4u8>() as u32) + } } #[inline] fn set_private_bf_c(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(8usize, 4u8, val as u64) + self._bitfield_1.set_const::<8usize, 4u8>(val as u64) } } #[inline] @@ -728,8 +1083,10 @@ impl Override { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 8usize, 4u8) - as u32, + >>::raw_get_const::< + 8usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -739,12 +1096,10 @@ impl Override { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 8usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -755,34 +1110,29 @@ impl Override { ) -> __BindgenBitfieldUnit<[u8; 2usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 2usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 4u8, - { - let bf_a: u32 = unsafe { ::std::mem::transmute(bf_a) }; - bf_a as u64 - }, - ); + >({ + let bf_a: u32 = unsafe { ::std::mem::transmute(bf_a) }; + bf_a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 4u8, - { - let bf_b: u32 = unsafe { ::std::mem::transmute(bf_b) }; - bf_b as u64 - }, - ); + >({ + let bf_b: u32 = unsafe { ::std::mem::transmute(bf_b) }; + bf_b as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 8usize, 4u8, - { - let private_bf_c: u32 = unsafe { - ::std::mem::transmute(private_bf_c) - }; - private_bf_c as u64 - }, - ); + >({ + let private_bf_c: u32 = unsafe { ::std::mem::transmute(private_bf_c) }; + private_bf_c as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/redundant-packed-and-align.rs b/bindgen-tests/tests/expectations/tests/redundant-packed-and-align.rs index 05401e52ca..77f9531f47 100644 --- a/bindgen-tests/tests/expectations/tests/redundant-packed-and-align.rs +++ b/bindgen-tests/tests/expectations/tests/redundant-packed-and-align.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -191,13 +534,15 @@ const _: () = { impl redundant_packed_bitfield { #[inline] pub fn b0(&self) -> u8 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_b0(&mut self, val: u8) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -206,7 +551,10 @@ impl redundant_packed_bitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -216,23 +564,23 @@ impl redundant_packed_bitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b1(&self) -> u8 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u8) + } } #[inline] pub fn set_b1(&mut self, val: u8) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -241,7 +589,10 @@ impl redundant_packed_bitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) as u8, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -251,35 +602,31 @@ impl redundant_packed_bitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(b0: u8, b1: u8) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let b0: u8 = unsafe { ::std::mem::transmute(b0) }; - b0 as u64 - }, - ); + >({ + let b0: u8 = unsafe { ::std::mem::transmute(b0) }; + b0 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let b1: u8 = unsafe { ::std::mem::transmute(b1) }; - b1 as u64 - }, - ); + >({ + let b1: u8 = unsafe { ::std::mem::transmute(b1) }; + b1 as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/struct_with_bitfields.rs b/bindgen-tests/tests/expectations/tests/struct_with_bitfields.rs index a294c871d3..20c85ccfc9 100644 --- a/bindgen-tests/tests/expectations/tests/struct_with_bitfields.rs +++ b/bindgen-tests/tests/expectations/tests/struct_with_bitfields.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,17 +108,32 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift } else { - i + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -161,13 +504,15 @@ const _: () = { impl bitfield { #[inline] pub fn a(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u16) + } } #[inline] pub fn set_a(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -176,8 +521,10 @@ impl bitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u16, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -187,23 +534,23 @@ impl bitfield { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u16) + } } #[inline] pub fn set_b(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -212,8 +559,10 @@ impl bitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) - as u16, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -223,23 +572,23 @@ impl bitfield { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn c(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u16) + } } #[inline] pub fn set_c(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -248,8 +597,10 @@ impl bitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) - as u16, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -259,23 +610,23 @@ impl bitfield { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn d(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 2u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<6usize, 2u8>() as u16) + } } #[inline] pub fn set_d(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(6usize, 2u8, val as u64) + self._bitfield_1.set_const::<6usize, 2u8>(val as u64) } } #[inline] @@ -284,8 +635,10 @@ impl bitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 6usize, 2u8) - as u16, + >>::raw_get_const::< + 6usize, + 2u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -295,12 +648,10 @@ impl bitfield { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 6usize, 2u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -312,52 +663,50 @@ impl bitfield { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u16 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u16 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let b: u16 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u16 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let c: u16 = unsafe { ::std::mem::transmute(c) }; - c as u64 - }, - ); + >({ + let c: u16 = unsafe { ::std::mem::transmute(c) }; + c as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 6usize, 2u8, - { - let d: u16 = unsafe { ::std::mem::transmute(d) }; - d as u64 - }, - ); + >({ + let d: u16 = unsafe { ::std::mem::transmute(d) }; + d as u64 + }); __bindgen_bitfield_unit } #[inline] pub fn f(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_2.get(0usize, 2u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<0usize, 2u8>() as u32) + } } #[inline] pub fn set_f(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_2.set(0usize, 2u8, val as u64) + self._bitfield_2.set_const::<0usize, 2u8>(val as u64) } } #[inline] @@ -366,8 +715,10 @@ impl bitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 0usize, 2u8) - as u32, + >>::raw_get_const::< + 0usize, + 2u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u32, ) } } @@ -377,23 +728,23 @@ impl bitfield { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 0usize, 2u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn g(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_2.get(32usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<32usize, 32u8>() as u32) + } } #[inline] pub fn set_g(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_2.set(32usize, 32u8, val as u64) + self._bitfield_2.set_const::<32usize, 32u8>(val as u64) } } #[inline] @@ -402,8 +753,10 @@ impl bitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 32usize, 32u8) - as u32, + >>::raw_get_const::< + 32usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u32, ) } } @@ -413,12 +766,10 @@ impl bitfield { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 32usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] @@ -428,23 +779,21 @@ impl bitfield { ) -> __BindgenBitfieldUnit<[u8; 8usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 2u8, - { - let f: u32 = unsafe { ::std::mem::transmute(f) }; - f as u64 - }, - ); + >({ + let f: u32 = unsafe { ::std::mem::transmute(f) }; + f as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 32usize, 32u8, - { - let g: u32 = unsafe { ::std::mem::transmute(g) }; - g as u64 - }, - ); + >({ + let g: u32 = unsafe { ::std::mem::transmute(g) }; + g as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/timex.rs b/bindgen-tests/tests/expectations/tests/timex.rs index f73b608de2..7e237c67bf 100644 --- a/bindgen-tests/tests/expectations/tests/timex.rs +++ b/bindgen-tests/tests/expectations/tests/timex.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } } else { - i + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -192,13 +535,15 @@ impl Default for timex_named { impl timex_named { #[inline] pub fn a(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 32u8>() as u32) + } } #[inline] pub fn set_a(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 32u8, val as u64) + self._bitfield_1.set_const::<0usize, 32u8>(val as u64) } } #[inline] @@ -207,8 +552,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 32u8) - as u32, + >>::raw_get_const::< + 0usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -218,23 +565,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(32usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<32usize, 32u8>() as u32) + } } #[inline] pub fn set_b(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(32usize, 32u8, val as u64) + self._bitfield_1.set_const::<32usize, 32u8>(val as u64) } } #[inline] @@ -243,8 +590,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 32usize, 32u8) - as u32, + >>::raw_get_const::< + 32usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -254,23 +603,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 32usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn c(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(64usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<64usize, 32u8>() as u32) + } } #[inline] pub fn set_c(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(64usize, 32u8, val as u64) + self._bitfield_1.set_const::<64usize, 32u8>(val as u64) } } #[inline] @@ -279,8 +628,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 64usize, 32u8) - as u32, + >>::raw_get_const::< + 64usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -290,23 +641,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 64usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn d(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(96usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<96usize, 32u8>() as u32) + } } #[inline] pub fn set_d(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(96usize, 32u8, val as u64) + self._bitfield_1.set_const::<96usize, 32u8>(val as u64) } } #[inline] @@ -315,8 +666,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 96usize, 32u8) - as u32, + >>::raw_get_const::< + 96usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -326,23 +679,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 96usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn e(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(128usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<128usize, 32u8>() as u32) + } } #[inline] pub fn set_e(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(128usize, 32u8, val as u64) + self._bitfield_1.set_const::<128usize, 32u8>(val as u64) } } #[inline] @@ -351,8 +704,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 128usize, 32u8) - as u32, + >>::raw_get_const::< + 128usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -362,23 +717,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 128usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn f(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(160usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<160usize, 32u8>() as u32) + } } #[inline] pub fn set_f(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(160usize, 32u8, val as u64) + self._bitfield_1.set_const::<160usize, 32u8>(val as u64) } } #[inline] @@ -387,8 +742,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 160usize, 32u8) - as u32, + >>::raw_get_const::< + 160usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -398,23 +755,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 160usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn g(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(192usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<192usize, 32u8>() as u32) + } } #[inline] pub fn set_g(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(192usize, 32u8, val as u64) + self._bitfield_1.set_const::<192usize, 32u8>(val as u64) } } #[inline] @@ -423,8 +780,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 192usize, 32u8) - as u32, + >>::raw_get_const::< + 192usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -434,23 +793,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 192usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn h(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(224usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<224usize, 32u8>() as u32) + } } #[inline] pub fn set_h(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(224usize, 32u8, val as u64) + self._bitfield_1.set_const::<224usize, 32u8>(val as u64) } } #[inline] @@ -459,8 +818,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 224usize, 32u8) - as u32, + >>::raw_get_const::< + 224usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -470,23 +831,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 224usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn i(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(256usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<256usize, 32u8>() as u32) + } } #[inline] pub fn set_i(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(256usize, 32u8, val as u64) + self._bitfield_1.set_const::<256usize, 32u8>(val as u64) } } #[inline] @@ -495,8 +856,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 256usize, 32u8) - as u32, + >>::raw_get_const::< + 256usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -506,23 +869,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 256usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn j(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(288usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<288usize, 32u8>() as u32) + } } #[inline] pub fn set_j(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(288usize, 32u8, val as u64) + self._bitfield_1.set_const::<288usize, 32u8>(val as u64) } } #[inline] @@ -531,8 +894,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 288usize, 32u8) - as u32, + >>::raw_get_const::< + 288usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -542,23 +907,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 288usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn k(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(320usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<320usize, 32u8>() as u32) + } } #[inline] pub fn set_k(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(320usize, 32u8, val as u64) + self._bitfield_1.set_const::<320usize, 32u8>(val as u64) } } #[inline] @@ -567,8 +932,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 320usize, 32u8) - as u32, + >>::raw_get_const::< + 320usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -578,12 +945,10 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 320usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } } diff --git a/bindgen-tests/tests/expectations/tests/union_bitfield.rs b/bindgen-tests/tests/expectations/tests/union_bitfield.rs index 8df0724738..d9af24c920 100644 --- a/bindgen-tests/tests/expectations/tests/union_bitfield.rs +++ b/bindgen-tests/tests/expectations/tests/union_bitfield.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift } else { - i + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -168,13 +511,15 @@ impl Default for U4 { impl U4 { #[inline] pub fn derp(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u32) + } } #[inline] pub fn set_derp(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -183,8 +528,10 @@ impl U4 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u32, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -194,12 +541,10 @@ impl U4 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -208,14 +553,13 @@ impl U4 { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let derp: u32 = unsafe { ::std::mem::transmute(derp) }; - derp as u64 - }, - ); + >({ + let derp: u32 = unsafe { ::std::mem::transmute(derp) }; + derp as u64 + }); __bindgen_bitfield_unit } } @@ -242,13 +586,15 @@ impl Default for B { impl B { #[inline] pub fn foo(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 31u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 31u8>() as u32) + } } #[inline] pub fn set_foo(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 31u8, val as u64) + self._bitfield_1.set_const::<0usize, 31u8>(val as u64) } } #[inline] @@ -257,8 +603,10 @@ impl B { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 31u8) - as u32, + >>::raw_get_const::< + 0usize, + 31u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -268,23 +616,23 @@ impl B { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 31u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bar(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_bar(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -293,7 +641,10 @@ impl B { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -303,12 +654,10 @@ impl B { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -318,23 +667,21 @@ impl B { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 31u8, - { - let foo: u32 = unsafe { ::std::mem::transmute(foo) }; - foo as u64 - }, - ); + >({ + let foo: u32 = unsafe { ::std::mem::transmute(foo) }; + foo as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let bar: u8 = unsafe { ::std::mem::transmute(bar) }; - bar as u64 - }, - ); + >({ + let bar: u8 = unsafe { ::std::mem::transmute(bar) }; + bar as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/union_with_anon_struct_bitfield.rs b/bindgen-tests/tests/expectations/tests/union_with_anon_struct_bitfield.rs index a1b61c035d..b757df6f4a 100644 --- a/bindgen-tests/tests/expectations/tests/union_with_anon_struct_bitfield.rs +++ b/bindgen-tests/tests/expectations/tests/union_with_anon_struct_bitfield.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -167,13 +510,15 @@ const _: () = { impl foo__bindgen_ty_1 { #[inline] pub fn b(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 7u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 7u8>() as u32) + } } #[inline] pub fn set_b(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 7u8, val as u64) + self._bitfield_1.set_const::<0usize, 7u8>(val as u64) } } #[inline] @@ -182,8 +527,10 @@ impl foo__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 7u8) - as u32, + >>::raw_get_const::< + 0usize, + 7u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -193,23 +540,23 @@ impl foo__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 7u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn c(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(7usize, 25u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<7usize, 25u8>() as u32) + } } #[inline] pub fn set_c(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(7usize, 25u8, val as u64) + self._bitfield_1.set_const::<7usize, 25u8>(val as u64) } } #[inline] @@ -218,8 +565,10 @@ impl foo__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 7usize, 25u8) - as u32, + >>::raw_get_const::< + 7usize, + 25u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -229,12 +578,10 @@ impl foo__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 7usize, 25u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -244,23 +591,21 @@ impl foo__bindgen_ty_1 { ) -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 7u8, - { - let b: u32 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u32 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 7usize, 25u8, - { - let c: u32 = unsafe { ::std::mem::transmute(c) }; - c as u64 - }, - ); + >({ + let c: u32 = unsafe { ::std::mem::transmute(c) }; + c as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/weird_bitfields.rs b/bindgen-tests/tests/expectations/tests/weird_bitfields.rs index ca8d84520b..7a9269bbef 100644 --- a/bindgen-tests/tests/expectations/tests/weird_bitfields.rs +++ b/bindgen-tests/tests/expectations/tests/weird_bitfields.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,17 +108,32 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,308 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } + } + } +} +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } } else { - i + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } @@ -224,13 +567,15 @@ impl Default for Weird { impl Weird { #[inline] pub fn bitTest(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 16u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 16u8>() as u32) + } } #[inline] pub fn set_bitTest(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 16u8, val as u64) + self._bitfield_1.set_const::<0usize, 16u8>(val as u64) } } #[inline] @@ -239,8 +584,10 @@ impl Weird { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 16u8) - as u32, + >>::raw_get_const::< + 0usize, + 16u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -250,23 +597,23 @@ impl Weird { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 16u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bitTest2(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 15u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<16usize, 15u8>() as u32) + } } #[inline] pub fn set_bitTest2(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 15u8, val as u64) + self._bitfield_1.set_const::<16usize, 15u8>(val as u64) } } #[inline] @@ -275,8 +622,10 @@ impl Weird { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 16usize, 15u8) - as u32, + >>::raw_get_const::< + 16usize, + 15u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -286,12 +635,10 @@ impl Weird { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 16usize, 15u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -301,34 +648,34 @@ impl Weird { ) -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 16u8, - { - let bitTest: u32 = unsafe { ::std::mem::transmute(bitTest) }; - bitTest as u64 - }, - ); + >({ + let bitTest: u32 = unsafe { ::std::mem::transmute(bitTest) }; + bitTest as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 15u8, - { - let bitTest2: u32 = unsafe { ::std::mem::transmute(bitTest2) }; - bitTest2 as u64 - }, - ); + >({ + let bitTest2: u32 = unsafe { ::std::mem::transmute(bitTest2) }; + bitTest2 as u64 + }); __bindgen_bitfield_unit } #[inline] pub fn mFillOpacitySource(&self) -> nsStyleSVGOpacitySource { - unsafe { ::std::mem::transmute(self._bitfield_2.get(0usize, 3u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<0usize, 3u8>() as u32) + } } #[inline] pub fn set_mFillOpacitySource(&mut self, val: nsStyleSVGOpacitySource) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_2.set(0usize, 3u8, val as u64) + self._bitfield_2.set_const::<0usize, 3u8>(val as u64) } } #[inline] @@ -337,8 +684,10 @@ impl Weird { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 0usize, 3u8) - as u32, + >>::raw_get_const::< + 0usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u32, ) } } @@ -351,23 +700,23 @@ impl Weird { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 0usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn mStrokeOpacitySource(&self) -> nsStyleSVGOpacitySource { - unsafe { ::std::mem::transmute(self._bitfield_2.get(3usize, 3u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<3usize, 3u8>() as u32) + } } #[inline] pub fn set_mStrokeOpacitySource(&mut self, val: nsStyleSVGOpacitySource) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_2.set(3usize, 3u8, val as u64) + self._bitfield_2.set_const::<3usize, 3u8>(val as u64) } } #[inline] @@ -378,8 +727,10 @@ impl Weird { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 3usize, 3u8) - as u32, + >>::raw_get_const::< + 3usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u32, ) } } @@ -392,23 +743,23 @@ impl Weird { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 3usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn mStrokeDasharrayFromObject(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_2.get(6usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<6usize, 1u8>() as u8) + } } #[inline] pub fn set_mStrokeDasharrayFromObject(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_2.set(6usize, 1u8, val as u64) + self._bitfield_2.set_const::<6usize, 1u8>(val as u64) } } #[inline] @@ -417,7 +768,10 @@ impl Weird { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 6usize, 1u8) as u8, + >>::raw_get_const::< + 6usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u8, ) } } @@ -427,23 +781,23 @@ impl Weird { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 6usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn mStrokeDashoffsetFromObject(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_2.get(7usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<7usize, 1u8>() as u8) + } } #[inline] pub fn set_mStrokeDashoffsetFromObject(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_2.set(7usize, 1u8, val as u64) + self._bitfield_2.set_const::<7usize, 1u8>(val as u64) } } #[inline] @@ -452,7 +806,10 @@ impl Weird { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 7usize, 1u8) as u8, + >>::raw_get_const::< + 7usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u8, ) } } @@ -462,23 +819,23 @@ impl Weird { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 7usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn mStrokeWidthFromObject(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_2.get(8usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<8usize, 1u8>() as u8) + } } #[inline] pub fn set_mStrokeWidthFromObject(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_2.set(8usize, 1u8, val as u64) + self._bitfield_2.set_const::<8usize, 1u8>(val as u64) } } #[inline] @@ -487,7 +844,10 @@ impl Weird { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 8usize, 1u8) as u8, + >>::raw_get_const::< + 8usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u8, ) } } @@ -497,12 +857,10 @@ impl Weird { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 8usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] @@ -515,60 +873,55 @@ impl Weird { ) -> __BindgenBitfieldUnit<[u8; 2usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 2usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 3u8, - { - let mFillOpacitySource: u32 = unsafe { - ::std::mem::transmute(mFillOpacitySource) - }; - mFillOpacitySource as u64 - }, - ); + >({ + let mFillOpacitySource: u32 = unsafe { + ::std::mem::transmute(mFillOpacitySource) + }; + mFillOpacitySource as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 3usize, 3u8, - { - let mStrokeOpacitySource: u32 = unsafe { - ::std::mem::transmute(mStrokeOpacitySource) - }; - mStrokeOpacitySource as u64 - }, - ); + >({ + let mStrokeOpacitySource: u32 = unsafe { + ::std::mem::transmute(mStrokeOpacitySource) + }; + mStrokeOpacitySource as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 6usize, 1u8, - { - let mStrokeDasharrayFromObject: u8 = unsafe { - ::std::mem::transmute(mStrokeDasharrayFromObject) - }; - mStrokeDasharrayFromObject as u64 - }, - ); + >({ + let mStrokeDasharrayFromObject: u8 = unsafe { + ::std::mem::transmute(mStrokeDasharrayFromObject) + }; + mStrokeDasharrayFromObject as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 7usize, 1u8, - { - let mStrokeDashoffsetFromObject: u8 = unsafe { - ::std::mem::transmute(mStrokeDashoffsetFromObject) - }; - mStrokeDashoffsetFromObject as u64 - }, - ); + >({ + let mStrokeDashoffsetFromObject: u8 = unsafe { + ::std::mem::transmute(mStrokeDashoffsetFromObject) + }; + mStrokeDashoffsetFromObject as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 8usize, 1u8, - { - let mStrokeWidthFromObject: u8 = unsafe { - ::std::mem::transmute(mStrokeWidthFromObject) - }; - mStrokeWidthFromObject as u64 - }, - ); + >({ + let mStrokeWidthFromObject: u8 = unsafe { + ::std::mem::transmute(mStrokeWidthFromObject) + }; + mStrokeWidthFromObject as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen/codegen/bitfield_unit.rs b/bindgen/codegen/bitfield_unit.rs index 8be311e311..3ca5a8d6db 100644 --- a/bindgen/codegen/bitfield_unit.rs +++ b/bindgen/codegen/bitfield_unit.rs @@ -99,19 +99,39 @@ where self.storage.as_ref().len() ); - let mut val = 0; + if bit_width == 0 { + return 0; + } - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= + (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + + // Mask to bit_width + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + + if cfg!(target_endian = "big") { + // Reverse bits within the field width + val = val.reverse_bits() >> (64 - bit_width as usize); + } + val } @@ -128,19 +148,40 @@ where core::mem::size_of::() ); - let mut val = 0; + if bit_width == 0 { + return 0; + } - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + + let storage_ptr = + unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + val } @@ -153,15 +194,50 @@ where self.storage.as_ref().len() ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + + let mut val = val; + + // Mask to bit_width + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + + if cfg!(target_endian = "big") { + // Reverse bits to match storage layout + val = val.reverse_bits() >> (64 - bit_width as usize); + } + + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + + // Shift val to align with byte boundary + val <<= bit_shift; + + // Create mask for the bits we're writing + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & + !byte_mask) | + (byte_val & byte_mask); + } } } @@ -179,17 +255,405 @@ where core::mem::size_of::() ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + + let mut val = val; + + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + + val <<= bit_shift; + + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + + let storage_ptr = + unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i + unsafe { + *byte_ptr = + (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + } + } +} + +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const( + &self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + + if BIT_WIDTH == 0 { + return 0; + } + + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + + // Use usize for fields that fit, u64 only when necessary. + // The compiler eliminates the unused branch since BIT_WIDTH is const. + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i].reverse_bits() + as usize) << + (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> + (usize::BITS as usize - BIT_WIDTH as usize); + } + + val as u64 + } else { + let mut val = 0u64; + + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i].reverse_bits() as u64) << + (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + + val >>= bit_shift; + + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + + val + } + } + + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const( + &mut self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + + if BIT_WIDTH == 0 { + return; + } + + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + + // Use usize for fields that fit, u64 only when necessary. + // The compiler eliminates the unused branch since BIT_WIDTH is const. + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> + (usize::BITS as usize - BIT_WIDTH as usize); + } + + val <<= bit_shift; + + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = + (self.storage[start_byte + i] & !byte_mask) | + (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + + val <<= bit_shift; + + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; - unsafe { - Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) + + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = + (self.storage[start_byte + i] & !byte_mask) | + (byte_val & byte_mask); + } + i += 1; + } + } + } + + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const< + const BIT_OFFSET: usize, + const BIT_WIDTH: u8, + >( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + + if BIT_WIDTH == 0 { + return 0; + } + + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + + let storage_ptr = + unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + + // Use usize for fields that fit, u64 only when necessary. + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> + (usize::BITS as usize - BIT_WIDTH as usize); + } + + val as u64 + } else { + let mut val = 0u64; + + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + + val >>= bit_shift; + + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + + val + } + } + + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const< + const BIT_OFFSET: usize, + const BIT_WIDTH: u8, + >( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + + if BIT_WIDTH == 0 { + return; + } + + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + + // Cast through pointer types instead of using addr_of_mut! for const compatibility + let storage_ptr = this.cast::<[u8; N]>().cast::(); + + // Use usize for fields that fit, u64 only when necessary. + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> + (usize::BITS as usize - BIT_WIDTH as usize); + } + + val <<= bit_shift; + + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = + (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + + val <<= bit_shift; + + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift }; + + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = + (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } } } } diff --git a/bindgen/codegen/bitfield_unit_tests.rs b/bindgen/codegen/bitfield_unit_tests.rs index ead0ffec0c..8c9ffdb0fa 100644 --- a/bindgen/codegen/bitfield_unit_tests.rs +++ b/bindgen/codegen/bitfield_unit_tests.rs @@ -258,3 +258,112 @@ bitfield_unit_set! { set(7, 16, 0b1111111111111111) is 0b00000000011111111111111110000000; set(8, 16, 0b1111111111111111) is 0b00000000111111111111111100000000; } + +// Tests for const-generic methods +#[test] +fn bitfield_unit_get_const_matches_get() { + // Test that get_const produces same results as get + let unit = __BindgenBitfieldUnit::<[u8; 4]>::new([ + 0b01010101, 0b11111111, 0b00000000, 0b11111111, + ]); + + // Single byte tests + assert_eq!(unit.get_const::<0, 1>(), unit.get(0, 1)); + assert_eq!(unit.get_const::<1, 1>(), unit.get(1, 1)); + assert_eq!(unit.get_const::<0, 8>(), unit.get(0, 8)); + assert_eq!(unit.get_const::<3, 5>(), unit.get(3, 5)); + + // Cross-byte boundary tests + assert_eq!(unit.get_const::<0, 16>(), unit.get(0, 16)); + assert_eq!(unit.get_const::<4, 16>(), unit.get(4, 16)); + assert_eq!(unit.get_const::<7, 16>(), unit.get(7, 16)); + assert_eq!(unit.get_const::<8, 16>(), unit.get(8, 16)); + + // Large field + assert_eq!(unit.get_const::<0, 32>(), unit.get(0, 32)); +} + +#[test] +fn bitfield_unit_set_const_matches_set() { + // Test that set_const produces same results as set + let test_value = 0b101010101010; + + for offset in [0, 1, 3, 7, 8, 12] { + for width in [1, 2, 5, 8, 12] { + let mut unit_const = __BindgenBitfieldUnit::<[u8; 4]>::new([0; 4]); + let mut unit_runtime = + __BindgenBitfieldUnit::<[u8; 4]>::new([0; 4]); + + match (offset, width) { + (0, 1) => unit_const.set_const::<0, 1>(test_value), + (0, 2) => unit_const.set_const::<0, 2>(test_value), + (0, 5) => unit_const.set_const::<0, 5>(test_value), + (0, 8) => unit_const.set_const::<0, 8>(test_value), + (0, 12) => unit_const.set_const::<0, 12>(test_value), + (1, 1) => unit_const.set_const::<1, 1>(test_value), + (1, 2) => unit_const.set_const::<1, 2>(test_value), + (1, 5) => unit_const.set_const::<1, 5>(test_value), + (1, 8) => unit_const.set_const::<1, 8>(test_value), + (1, 12) => unit_const.set_const::<1, 12>(test_value), + (3, 1) => unit_const.set_const::<3, 1>(test_value), + (3, 2) => unit_const.set_const::<3, 2>(test_value), + (3, 5) => unit_const.set_const::<3, 5>(test_value), + (3, 8) => unit_const.set_const::<3, 8>(test_value), + (3, 12) => unit_const.set_const::<3, 12>(test_value), + (7, 1) => unit_const.set_const::<7, 1>(test_value), + (7, 2) => unit_const.set_const::<7, 2>(test_value), + (7, 5) => unit_const.set_const::<7, 5>(test_value), + (7, 8) => unit_const.set_const::<7, 8>(test_value), + (7, 12) => unit_const.set_const::<7, 12>(test_value), + (8, 1) => unit_const.set_const::<8, 1>(test_value), + (8, 2) => unit_const.set_const::<8, 2>(test_value), + (8, 5) => unit_const.set_const::<8, 5>(test_value), + (8, 8) => unit_const.set_const::<8, 8>(test_value), + (8, 12) => unit_const.set_const::<8, 12>(test_value), + (12, 1) => unit_const.set_const::<12, 1>(test_value), + (12, 2) => unit_const.set_const::<12, 2>(test_value), + (12, 5) => unit_const.set_const::<12, 5>(test_value), + (12, 8) => unit_const.set_const::<12, 8>(test_value), + (12, 12) => unit_const.set_const::<12, 12>(test_value), + _ => continue, + } + + unit_runtime.set(offset, width, test_value); + // Compare by reading back the full value + assert_eq!(unit_const.get(0, 32), unit_runtime.get(0, 32)); + } + } +} + +#[test] +fn bitfield_unit_raw_const_methods() { + let unit = __BindgenBitfieldUnit::<[u8; 2]>::new([0b10011101, 0b00011101]); + + // Test raw_get_const + unsafe { + assert_eq!( + __BindgenBitfieldUnit::raw_get_const::<0, 8>(&unit), + unit.get(0, 8) + ); + assert_eq!( + __BindgenBitfieldUnit::raw_get_const::<4, 8>(&unit), + unit.get(4, 8) + ); + assert_eq!( + __BindgenBitfieldUnit::raw_get_const::<0, 16>(&unit), + unit.get(0, 16) + ); + } + + // Test raw_set_const + let mut unit_const = __BindgenBitfieldUnit::<[u8; 2]>::new([0; 2]); + let mut unit_runtime = __BindgenBitfieldUnit::<[u8; 2]>::new([0; 2]); + + unsafe { + __BindgenBitfieldUnit::raw_set_const::<3, 5>(&mut unit_const, 0b11111); + } + unit_runtime.set(3, 5, 0b11111); + + // Compare by reading back + assert_eq!(unit_const.get(0, 16), unit_runtime.get(0, 16)); +} diff --git a/bindgen/codegen/mod.rs b/bindgen/codegen/mod.rs index a5aa73b5d8..75801ad117 100644 --- a/bindgen/codegen/mod.rs +++ b/bindgen/codegen/mod.rs @@ -1786,9 +1786,7 @@ impl Bitfield { let prefix = ctx.trait_prefix(); ctor_impl.append_all(quote! { - __bindgen_bitfield_unit.set( - #offset, - #width, + __bindgen_bitfield_unit.set_const::<#offset, #width>( { let #param_name: #bitfield_int_ty = unsafe { ::#prefix::mem::transmute(#param_name) @@ -2149,7 +2147,7 @@ impl<'a> FieldCodegen<'a> for Bitfield { #access_spec fn #getter_name(&self) -> #bitfield_ty { unsafe { ::#prefix::mem::transmute( - self.#unit_field_ident.get(#offset, #width) + self.#unit_field_ident.get_const::<#offset, #width>() as #bitfield_int_ty ) } @@ -2159,9 +2157,7 @@ impl<'a> FieldCodegen<'a> for Bitfield { #access_spec fn #setter_name(&mut self, val: #bitfield_ty) { unsafe { let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); - self.#unit_field_ident.set( - #offset, - #width, + self.#unit_field_ident.set_const::<#offset, #width>( val as u64 ) } @@ -2172,10 +2168,8 @@ impl<'a> FieldCodegen<'a> for Bitfield { #[inline] #access_spec unsafe fn #raw_getter_name(this: *const Self) -> #bitfield_ty { unsafe { - ::#prefix::mem::transmute(<#unit_field_ty>::raw_get( + ::#prefix::mem::transmute(<#unit_field_ty>::raw_get_const::<#offset, #width>( ::#prefix::ptr::addr_of!((*this).#unit_field_ident), - #offset, - #width, ) as #bitfield_int_ty) } } @@ -2184,10 +2178,8 @@ impl<'a> FieldCodegen<'a> for Bitfield { #access_spec unsafe fn #raw_setter_name(this: *mut Self, val: #bitfield_ty) { unsafe { let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); - <#unit_field_ty>::raw_set( + <#unit_field_ty>::raw_set_const::<#offset, #width>( ::#prefix::ptr::addr_of_mut!((*this).#unit_field_ident), - #offset, - #width, val as u64, ) }