From c993830643589a05535081b000ca238516210f9e Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Sat, 6 Dec 2025 02:43:57 -0800 Subject: [PATCH 1/2] Optimize BitfieldUnit get/set with byte-wise operations Replace bit-at-a-time loops in get(), set(), raw_get(), and raw_set() with byte-wise shift/mask operations. This significantly improves performance for multi-bit field access. The new implementation reads/writes bytes in chunks and uses shifts and masks to extract/insert values, rather than iterating over each bit individually. For big-endian targets, the algorithm reverses bits within bytes and the final value to maintain the same semantics as the original implementation. --- .../tests/bitfield-32bit-overflow.rs | 140 ++++++++++---- .../expectations/tests/bitfield-large.rs | 140 ++++++++++---- .../expectations/tests/bitfield-linux-32.rs | 140 ++++++++++---- .../tests/bitfield-method-same-name.rs | 140 ++++++++++---- .../expectations/tests/bitfield-template.rs | 140 ++++++++++---- .../expectations/tests/bitfield_align.rs | 140 ++++++++++---- .../expectations/tests/bitfield_align_2.rs | 140 ++++++++++---- .../tests/bitfield_method_mangling.rs | 140 ++++++++++---- .../tests/bitfield_pack_offset.rs | 140 ++++++++++---- .../tests/bitfield_pragma_packed.rs | 140 ++++++++++---- .../tests/default_visibility_crate.rs | 140 ++++++++++---- .../tests/default_visibility_private.rs | 140 ++++++++++---- ...bility_private_respects_cxx_access_spec.rs | 140 ++++++++++---- .../tests/derive-bitfield-method-same-name.rs | 140 ++++++++++---- .../tests/derive-debug-bitfield-1-51.rs | 140 ++++++++++---- .../tests/derive-debug-bitfield-core.rs | 140 ++++++++++---- .../tests/derive-debug-bitfield.rs | 140 ++++++++++---- .../tests/derive-partialeq-bitfield.rs | 140 ++++++++++---- .../tests/divide-by-zero-in-struct-layout.rs | 140 ++++++++++---- .../tests/field-visibility-callback.rs | 140 ++++++++++---- .../expectations/tests/field-visibility.rs | 140 ++++++++++---- .../tests/incomplete-array-padding.rs | 140 ++++++++++---- .../tests/expectations/tests/issue-1034.rs | 140 ++++++++++---- .../issue-1076-unnamed-bitfield-alignment.rs | 140 ++++++++++---- .../tests/expectations/tests/issue-1947.rs | 140 ++++++++++---- .../tests/issue-739-pointer-wide-bitfield.rs | 140 ++++++++++---- .../tests/expectations/tests/issue-743.rs | 140 ++++++++++---- .../tests/expectations/tests/issue-816.rs | 140 ++++++++++---- .../expectations/tests/jsval_layout_opaque.rs | 140 ++++++++++---- .../tests/expectations/tests/layout_align.rs | 140 ++++++++++---- .../expectations/tests/layout_eth_conf.rs | 140 ++++++++++---- .../tests/expectations/tests/layout_mbuf.rs | 140 ++++++++++---- .../expectations/tests/only_bitfields.rs | 140 ++++++++++---- .../expectations/tests/packed-bitfield.rs | 140 ++++++++++---- .../expectations/tests/private_fields.rs | 140 ++++++++++---- .../tests/redundant-packed-and-align.rs | 140 ++++++++++---- .../tests/struct_with_bitfields.rs | 140 ++++++++++---- .../tests/expectations/tests/timex.rs | 140 ++++++++++---- .../expectations/tests/union_bitfield.rs | 140 ++++++++++---- .../tests/union_with_anon_struct_bitfield.rs | 140 ++++++++++---- .../expectations/tests/weird_bitfields.rs | 140 ++++++++++---- bindgen/codegen/bitfield_unit.rs | 180 ++++++++++++++---- 42 files changed, 4489 insertions(+), 1431 deletions(-) diff --git a/bindgen-tests/tests/expectations/tests/bitfield-32bit-overflow.rs b/bindgen-tests/tests/expectations/tests/bitfield-32bit-overflow.rs index 783f0ef7a9..97e4816a3d 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield-32bit-overflow.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield-32bit-overflow.rs @@ -75,16 +75,29 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield-large.rs b/bindgen-tests/tests/expectations/tests/bitfield-large.rs index 5d614ab936..45d8198c12 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield-large.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield-large.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield-linux-32.rs b/bindgen-tests/tests/expectations/tests/bitfield-linux-32.rs index 3e676c53b5..9fd3b3d7a5 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield-linux-32.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield-linux-32.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield-method-same-name.rs b/bindgen-tests/tests/expectations/tests/bitfield-method-same-name.rs index 09ca005589..737b10fbc9 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield-method-same-name.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield-method-same-name.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield-template.rs b/bindgen-tests/tests/expectations/tests/bitfield-template.rs index eb454e0db4..9639db7cbb 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield-template.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield-template.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield_align.rs b/bindgen-tests/tests/expectations/tests/bitfield_align.rs index 0c70917fc5..58302c5d0f 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield_align.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield_align.rs @@ -75,16 +75,29 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield_align_2.rs b/bindgen-tests/tests/expectations/tests/bitfield_align_2.rs index b71bba18ad..e568c8ded3 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield_align_2.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield_align_2.rs @@ -76,17 +76,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -96,16 +109,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -116,15 +144,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -134,15 +184,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield_method_mangling.rs b/bindgen-tests/tests/expectations/tests/bitfield_method_mangling.rs index 35117c74b6..2bb54f326a 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield_method_mangling.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield_method_mangling.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield_pack_offset.rs b/bindgen-tests/tests/expectations/tests/bitfield_pack_offset.rs index d654e25b27..dad536203f 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield_pack_offset.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield_pack_offset.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield_pragma_packed.rs b/bindgen-tests/tests/expectations/tests/bitfield_pragma_packed.rs index 6f9adcb5ab..54186cb33e 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield_pragma_packed.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield_pragma_packed.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/default_visibility_crate.rs b/bindgen-tests/tests/expectations/tests/default_visibility_crate.rs index aeefb2e0f9..68f26e5f3b 100644 --- a/bindgen-tests/tests/expectations/tests/default_visibility_crate.rs +++ b/bindgen-tests/tests/expectations/tests/default_visibility_crate.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/default_visibility_private.rs b/bindgen-tests/tests/expectations/tests/default_visibility_private.rs index dceed75e36..7397db3baf 100644 --- a/bindgen-tests/tests/expectations/tests/default_visibility_private.rs +++ b/bindgen-tests/tests/expectations/tests/default_visibility_private.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/default_visibility_private_respects_cxx_access_spec.rs b/bindgen-tests/tests/expectations/tests/default_visibility_private_respects_cxx_access_spec.rs index f43be84bb0..4e4d8f1d6c 100644 --- a/bindgen-tests/tests/expectations/tests/default_visibility_private_respects_cxx_access_spec.rs +++ b/bindgen-tests/tests/expectations/tests/default_visibility_private_respects_cxx_access_spec.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/derive-bitfield-method-same-name.rs b/bindgen-tests/tests/expectations/tests/derive-bitfield-method-same-name.rs index 7fa8bc41ab..2d41f391c2 100644 --- a/bindgen-tests/tests/expectations/tests/derive-bitfield-method-same-name.rs +++ b/bindgen-tests/tests/expectations/tests/derive-bitfield-method-same-name.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-1-51.rs b/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-1-51.rs index 87cbb7346c..f88689d284 100644 --- a/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-1-51.rs +++ b/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-1-51.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-core.rs b/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-core.rs index 937ad4ad0c..ba6c38706a 100644 --- a/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-core.rs +++ b/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-core.rs @@ -76,17 +76,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -96,16 +109,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -116,15 +144,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -134,15 +184,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/derive-debug-bitfield.rs b/bindgen-tests/tests/expectations/tests/derive-debug-bitfield.rs index 87cbb7346c..f88689d284 100644 --- a/bindgen-tests/tests/expectations/tests/derive-debug-bitfield.rs +++ b/bindgen-tests/tests/expectations/tests/derive-debug-bitfield.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/derive-partialeq-bitfield.rs b/bindgen-tests/tests/expectations/tests/derive-partialeq-bitfield.rs index b8da88e2a7..3375049d91 100644 --- a/bindgen-tests/tests/expectations/tests/derive-partialeq-bitfield.rs +++ b/bindgen-tests/tests/expectations/tests/derive-partialeq-bitfield.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/divide-by-zero-in-struct-layout.rs b/bindgen-tests/tests/expectations/tests/divide-by-zero-in-struct-layout.rs index 37139d3136..b7fc127f78 100644 --- a/bindgen-tests/tests/expectations/tests/divide-by-zero-in-struct-layout.rs +++ b/bindgen-tests/tests/expectations/tests/divide-by-zero-in-struct-layout.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/field-visibility-callback.rs b/bindgen-tests/tests/expectations/tests/field-visibility-callback.rs index 99ca3d4b9b..95b084b828 100644 --- a/bindgen-tests/tests/expectations/tests/field-visibility-callback.rs +++ b/bindgen-tests/tests/expectations/tests/field-visibility-callback.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/field-visibility.rs b/bindgen-tests/tests/expectations/tests/field-visibility.rs index 13a1d9a543..420a8397f6 100644 --- a/bindgen-tests/tests/expectations/tests/field-visibility.rs +++ b/bindgen-tests/tests/expectations/tests/field-visibility.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/incomplete-array-padding.rs b/bindgen-tests/tests/expectations/tests/incomplete-array-padding.rs index a90fe54bf3..b3f8b8a06a 100644 --- a/bindgen-tests/tests/expectations/tests/incomplete-array-padding.rs +++ b/bindgen-tests/tests/expectations/tests/incomplete-array-padding.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/issue-1034.rs b/bindgen-tests/tests/expectations/tests/issue-1034.rs index 90cc768a94..87512f0ed1 100644 --- a/bindgen-tests/tests/expectations/tests/issue-1034.rs +++ b/bindgen-tests/tests/expectations/tests/issue-1034.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/issue-1076-unnamed-bitfield-alignment.rs b/bindgen-tests/tests/expectations/tests/issue-1076-unnamed-bitfield-alignment.rs index 50e9283b5a..59e54c9855 100644 --- a/bindgen-tests/tests/expectations/tests/issue-1076-unnamed-bitfield-alignment.rs +++ b/bindgen-tests/tests/expectations/tests/issue-1076-unnamed-bitfield-alignment.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/issue-1947.rs b/bindgen-tests/tests/expectations/tests/issue-1947.rs index 795b033a12..7ffb728c5d 100644 --- a/bindgen-tests/tests/expectations/tests/issue-1947.rs +++ b/bindgen-tests/tests/expectations/tests/issue-1947.rs @@ -75,16 +75,29 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/issue-739-pointer-wide-bitfield.rs b/bindgen-tests/tests/expectations/tests/issue-739-pointer-wide-bitfield.rs index bc1951e7d1..e3c5a76ea7 100644 --- a/bindgen-tests/tests/expectations/tests/issue-739-pointer-wide-bitfield.rs +++ b/bindgen-tests/tests/expectations/tests/issue-739-pointer-wide-bitfield.rs @@ -76,17 +76,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -96,16 +109,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -116,15 +144,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -134,15 +184,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/issue-743.rs b/bindgen-tests/tests/expectations/tests/issue-743.rs index af3eb5bf6e..bb8d9153a3 100644 --- a/bindgen-tests/tests/expectations/tests/issue-743.rs +++ b/bindgen-tests/tests/expectations/tests/issue-743.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/issue-816.rs b/bindgen-tests/tests/expectations/tests/issue-816.rs index b1494afede..f2fa4d77d6 100644 --- a/bindgen-tests/tests/expectations/tests/issue-816.rs +++ b/bindgen-tests/tests/expectations/tests/issue-816.rs @@ -75,16 +75,29 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/jsval_layout_opaque.rs b/bindgen-tests/tests/expectations/tests/jsval_layout_opaque.rs index dc0ef8ed7f..82e8f2b4b3 100644 --- a/bindgen-tests/tests/expectations/tests/jsval_layout_opaque.rs +++ b/bindgen-tests/tests/expectations/tests/jsval_layout_opaque.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/layout_align.rs b/bindgen-tests/tests/expectations/tests/layout_align.rs index a942adb8f2..c144d98450 100644 --- a/bindgen-tests/tests/expectations/tests/layout_align.rs +++ b/bindgen-tests/tests/expectations/tests/layout_align.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/layout_eth_conf.rs b/bindgen-tests/tests/expectations/tests/layout_eth_conf.rs index 7d975cd979..55127f0610 100644 --- a/bindgen-tests/tests/expectations/tests/layout_eth_conf.rs +++ b/bindgen-tests/tests/expectations/tests/layout_eth_conf.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/layout_mbuf.rs b/bindgen-tests/tests/expectations/tests/layout_mbuf.rs index ce6c58e39e..ee723e601e 100644 --- a/bindgen-tests/tests/expectations/tests/layout_mbuf.rs +++ b/bindgen-tests/tests/expectations/tests/layout_mbuf.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/only_bitfields.rs b/bindgen-tests/tests/expectations/tests/only_bitfields.rs index 9a73fc2fee..bb040c0faf 100644 --- a/bindgen-tests/tests/expectations/tests/only_bitfields.rs +++ b/bindgen-tests/tests/expectations/tests/only_bitfields.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/packed-bitfield.rs b/bindgen-tests/tests/expectations/tests/packed-bitfield.rs index b5a734454a..39477c1366 100644 --- a/bindgen-tests/tests/expectations/tests/packed-bitfield.rs +++ b/bindgen-tests/tests/expectations/tests/packed-bitfield.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/private_fields.rs b/bindgen-tests/tests/expectations/tests/private_fields.rs index abb2886d39..f4a34b522c 100644 --- a/bindgen-tests/tests/expectations/tests/private_fields.rs +++ b/bindgen-tests/tests/expectations/tests/private_fields.rs @@ -75,16 +75,29 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/redundant-packed-and-align.rs b/bindgen-tests/tests/expectations/tests/redundant-packed-and-align.rs index 05401e52ca..e3d8aff547 100644 --- a/bindgen-tests/tests/expectations/tests/redundant-packed-and-align.rs +++ b/bindgen-tests/tests/expectations/tests/redundant-packed-and-align.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/struct_with_bitfields.rs b/bindgen-tests/tests/expectations/tests/struct_with_bitfields.rs index a294c871d3..254e8357bd 100644 --- a/bindgen-tests/tests/expectations/tests/struct_with_bitfields.rs +++ b/bindgen-tests/tests/expectations/tests/struct_with_bitfields.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/timex.rs b/bindgen-tests/tests/expectations/tests/timex.rs index f73b608de2..6a097196e7 100644 --- a/bindgen-tests/tests/expectations/tests/timex.rs +++ b/bindgen-tests/tests/expectations/tests/timex.rs @@ -75,16 +75,29 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/union_bitfield.rs b/bindgen-tests/tests/expectations/tests/union_bitfield.rs index 8df0724738..465b87c0ce 100644 --- a/bindgen-tests/tests/expectations/tests/union_bitfield.rs +++ b/bindgen-tests/tests/expectations/tests/union_bitfield.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/union_with_anon_struct_bitfield.rs b/bindgen-tests/tests/expectations/tests/union_with_anon_struct_bitfield.rs index a1b61c035d..d0afed9b46 100644 --- a/bindgen-tests/tests/expectations/tests/union_with_anon_struct_bitfield.rs +++ b/bindgen-tests/tests/expectations/tests/union_with_anon_struct_bitfield.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen-tests/tests/expectations/tests/weird_bitfields.rs b/bindgen-tests/tests/expectations/tests/weird_bitfields.rs index ca8d84520b..e4c988c9a0 100644 --- a/bindgen-tests/tests/expectations/tests/weird_bitfields.rs +++ b/bindgen-tests/tests/expectations/tests/weird_bitfields.rs @@ -75,17 +75,30 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } val } #[inline] @@ -95,16 +108,31 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - let mut val = 0; - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + } + } + val >>= bit_shift; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); } val } @@ -115,15 +143,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = (storage[start_byte + i] & !byte_mask) + | (byte_val & byte_mask); + } } } #[inline] @@ -133,15 +183,37 @@ where debug_assert!( (bit_offset + (bit_width as usize)) / 8 <= core::mem::size_of::(), ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + let mut val = val; + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + val <<= bit_shift; + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + let storage_ptr = unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) }; + unsafe { *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; + } } } } diff --git a/bindgen/codegen/bitfield_unit.rs b/bindgen/codegen/bitfield_unit.rs index 8be311e311..c5ac6637ff 100644 --- a/bindgen/codegen/bitfield_unit.rs +++ b/bindgen/codegen/bitfield_unit.rs @@ -99,19 +99,38 @@ where self.storage.as_ref().len() ); - let mut val = 0; - - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + if bit_width == 0 { + return 0; + } + + let mut val = 0u64; + let storage = self.storage.as_ref(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + val |= (storage[start_byte + i] as u64) << (i * 8); } } + val >>= bit_shift; + + // Mask to bit_width + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + + if cfg!(target_endian = "big") { + // Reverse bits within the field width + val = val.reverse_bits() >> (64 - bit_width as usize); + } + val } @@ -128,19 +147,40 @@ where core::mem::size_of::() ); - let mut val = 0; + if bit_width == 0 { + return 0; + } - for i in 0..(bit_width as usize) { - if unsafe { Self::raw_get_bit(this, i + bit_offset) } { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; + let mut val = 0u64; + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + + let storage_ptr = + unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + + if cfg!(target_endian = "big") { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + } + } else { + for i in 0..bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); } } + val >>= bit_shift; + + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + val } @@ -153,15 +193,49 @@ where self.storage.as_ref().len() ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + + let mut val = val; + + // Mask to bit_width + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + + if cfg!(target_endian = "big") { + // Reverse bits to match storage layout + val = val.reverse_bits() >> (64 - bit_width as usize); + } + + let storage = self.storage.as_mut(); + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + + // Shift val to align with byte boundary + val <<= bit_shift; + + // Create mask for the bits we're writing + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + + if cfg!(target_endian = "big") { + let byte = storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = new_byte.reverse_bits(); } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); + storage[start_byte + i] = + (storage[start_byte + i] & !byte_mask) | (byte_val & byte_mask); + } } } @@ -179,17 +253,49 @@ where core::mem::size_of::() ); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i + if bit_width == 0 { + return; + } + + let mut val = val; + + if bit_width < 64 { + val &= (1u64 << bit_width) - 1; + } + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - bit_width as usize); + } + + let start_byte = bit_offset / 8; + let bit_shift = bit_offset % 8; + let bytes_needed = (bit_width as usize + bit_shift + 7) / 8; + + val <<= bit_shift; + + let field_mask = if bit_width as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << bit_width) - 1) << bit_shift + }; + + let storage_ptr = + unsafe { core::ptr::addr_of_mut!((*this).storage) as *mut u8 }; + + for i in 0..bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { - i - }; - unsafe { - Self::raw_set_bit(this, index + bit_offset, val_bit_is_set) - }; + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } } } } From 8496a5907c542f4d5c11c78e309e035d5c550c9c Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Sat, 6 Dec 2025 02:49:36 -0800 Subject: [PATCH 2/2] Add const-generic bitfield accessors for compile-time optimization Add a specialized impl block for `__BindgenBitfieldUnit<[u8; N]>` with const-generic methods: `get_const`, `set_const`, `raw_get_const`, and `raw_set_const`. These methods take bit offset and width as const generic parameters, allowing the compiler to fully optimize bitfield access at compile time. Update codegen to use the new const-generic methods for generated bitfield accessors. Since all bitfield offsets and widths are known at code generation time, this enables better optimization. The original runtime-parameter methods are preserved for backwards compatibility with users who provide custom implementations via the blocklist mechanism. --- .../tests/bitfield-32bit-overflow.rs | 1218 +++++++++----- .../expectations/tests/bitfield-large.rs | 358 +++- .../expectations/tests/bitfield-linux-32.rs | 329 +++- .../tests/bitfield-method-same-name.rs | 303 +++- .../expectations/tests/bitfield-template.rs | 299 +++- .../expectations/tests/bitfield_align.rs | 1054 +++++++----- .../expectations/tests/bitfield_align_2.rs | 329 +++- .../tests/bitfield_method_mangling.rs | 329 +++- .../tests/bitfield_pack_offset.rs | 470 ++++-- .../tests/bitfield_pragma_packed.rs | 471 ++++-- .../tests/blocklist_bitfield_unit.rs | 58 +- .../tests/default_visibility_crate.rs | 355 +++- .../tests/default_visibility_private.rs | 355 +++- ...bility_private_respects_cxx_access_spec.rs | 355 +++- .../tests/derive-bitfield-method-same-name.rs | 303 +++- .../tests/derive-debug-bitfield-1-51.rs | 327 +++- .../tests/derive-debug-bitfield-core.rs | 329 +++- .../tests/derive-debug-bitfield.rs | 327 +++- .../tests/derive-partialeq-bitfield.rs | 327 +++- .../tests/divide-by-zero-in-struct-layout.rs | 271 +++ .../tests/field-visibility-callback.rs | 329 +++- .../expectations/tests/field-visibility.rs | 329 +++- .../tests/incomplete-array-padding.rs | 299 +++- .../tests/expectations/tests/issue-1034.rs | 271 +++ .../issue-1076-unnamed-bitfield-alignment.rs | 271 +++ .../tests/expectations/tests/issue-1947.rs | 561 +++++-- .../tests/issue-739-pointer-wide-bitfield.rs | 387 ++++- .../tests/expectations/tests/issue-743.rs | 300 +++- .../tests/expectations/tests/issue-816.rs | 1460 ++++++++++------- .../expectations/tests/jsval_layout_opaque.rs | 329 +++- .../tests/expectations/tests/layout_align.rs | 360 +++- .../expectations/tests/layout_eth_conf.rs | 650 +++++--- .../tests/expectations/tests/layout_mbuf.rs | 658 +++++--- .../expectations/tests/only_bitfields.rs | 327 +++- .../expectations/tests/packed-bitfield.rs | 356 +++- .../expectations/tests/private_fields.rs | 534 ++++-- .../tests/redundant-packed-and-align.rs | 327 +++- .../tests/struct_with_bitfields.rs | 445 ++++- .../tests/expectations/tests/timex.rs | 469 +++++- .../expectations/tests/union_bitfield.rs | 357 +++- .../tests/union_with_anon_struct_bitfield.rs | 329 +++- .../expectations/tests/weird_bitfields.rs | 491 ++++-- bindgen/codegen/bitfield_unit.rs | 366 ++++- bindgen/codegen/bitfield_unit_tests.rs | 109 ++ bindgen/codegen/mod.rs | 18 +- 45 files changed, 15175 insertions(+), 3324 deletions(-) diff --git a/bindgen-tests/tests/expectations/tests/bitfield-32bit-overflow.rs b/bindgen-tests/tests/expectations/tests/bitfield-32bit-overflow.rs index 97e4816a3d..6ec853994a 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield-32bit-overflow.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield-32bit-overflow.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct MuchBitfield { @@ -230,13 +501,15 @@ const _: () = { impl MuchBitfield { #[inline] pub fn m0(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_m0(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -245,7 +518,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -255,23 +531,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m1(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u8) + } } #[inline] pub fn set_m1(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -280,7 +556,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) as u8, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -290,23 +569,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m2(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u8) + } } #[inline] pub fn set_m2(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -315,7 +594,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) as u8, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -325,23 +607,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m3(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<3usize, 1u8>() as u8) + } } #[inline] pub fn set_m3(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(3usize, 1u8, val as u64) + self._bitfield_1.set_const::<3usize, 1u8>(val as u64) } } #[inline] @@ -350,7 +632,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 3usize, 1u8) as u8, + >>::raw_get_const::< + 3usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -360,23 +645,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 3usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m4(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 1u8>() as u8) + } } #[inline] pub fn set_m4(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 1u8, val as u64) + self._bitfield_1.set_const::<4usize, 1u8>(val as u64) } } #[inline] @@ -385,7 +670,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 1u8) as u8, + >>::raw_get_const::< + 4usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -395,23 +683,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m5(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<5usize, 1u8>() as u8) + } } #[inline] pub fn set_m5(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(5usize, 1u8, val as u64) + self._bitfield_1.set_const::<5usize, 1u8>(val as u64) } } #[inline] @@ -420,7 +708,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 5usize, 1u8) as u8, + >>::raw_get_const::< + 5usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -430,23 +721,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 5usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m6(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<6usize, 1u8>() as u8) + } } #[inline] pub fn set_m6(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(6usize, 1u8, val as u64) + self._bitfield_1.set_const::<6usize, 1u8>(val as u64) } } #[inline] @@ -455,7 +746,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 6usize, 1u8) as u8, + >>::raw_get_const::< + 6usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -465,23 +759,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 6usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m7(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(7usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<7usize, 1u8>() as u8) + } } #[inline] pub fn set_m7(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(7usize, 1u8, val as u64) + self._bitfield_1.set_const::<7usize, 1u8>(val as u64) } } #[inline] @@ -490,7 +784,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 7usize, 1u8) as u8, + >>::raw_get_const::< + 7usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -500,23 +797,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 7usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m8(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<8usize, 1u8>() as u8) + } } #[inline] pub fn set_m8(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(8usize, 1u8, val as u64) + self._bitfield_1.set_const::<8usize, 1u8>(val as u64) } } #[inline] @@ -525,7 +822,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 8usize, 1u8) as u8, + >>::raw_get_const::< + 8usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -535,23 +835,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 8usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m9(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(9usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<9usize, 1u8>() as u8) + } } #[inline] pub fn set_m9(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(9usize, 1u8, val as u64) + self._bitfield_1.set_const::<9usize, 1u8>(val as u64) } } #[inline] @@ -560,7 +860,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 9usize, 1u8) as u8, + >>::raw_get_const::< + 9usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -570,23 +873,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 9usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m10(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(10usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<10usize, 1u8>() as u8) + } } #[inline] pub fn set_m10(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(10usize, 1u8, val as u64) + self._bitfield_1.set_const::<10usize, 1u8>(val as u64) } } #[inline] @@ -595,8 +898,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 10usize, 1u8) - as u8, + >>::raw_get_const::< + 10usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -606,23 +911,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 10usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m11(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(11usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<11usize, 1u8>() as u8) + } } #[inline] pub fn set_m11(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(11usize, 1u8, val as u64) + self._bitfield_1.set_const::<11usize, 1u8>(val as u64) } } #[inline] @@ -631,8 +936,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 11usize, 1u8) - as u8, + >>::raw_get_const::< + 11usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -642,23 +949,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 11usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m12(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(12usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<12usize, 1u8>() as u8) + } } #[inline] pub fn set_m12(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(12usize, 1u8, val as u64) + self._bitfield_1.set_const::<12usize, 1u8>(val as u64) } } #[inline] @@ -667,8 +974,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 12usize, 1u8) - as u8, + >>::raw_get_const::< + 12usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -678,23 +987,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 12usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m13(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(13usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<13usize, 1u8>() as u8) + } } #[inline] pub fn set_m13(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(13usize, 1u8, val as u64) + self._bitfield_1.set_const::<13usize, 1u8>(val as u64) } } #[inline] @@ -703,8 +1012,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 13usize, 1u8) - as u8, + >>::raw_get_const::< + 13usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -714,23 +1025,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 13usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m14(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(14usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<14usize, 1u8>() as u8) + } } #[inline] pub fn set_m14(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(14usize, 1u8, val as u64) + self._bitfield_1.set_const::<14usize, 1u8>(val as u64) } } #[inline] @@ -739,8 +1050,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 14usize, 1u8) - as u8, + >>::raw_get_const::< + 14usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -750,23 +1063,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 14usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m15(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(15usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<15usize, 1u8>() as u8) + } } #[inline] pub fn set_m15(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(15usize, 1u8, val as u64) + self._bitfield_1.set_const::<15usize, 1u8>(val as u64) } } #[inline] @@ -775,8 +1088,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 15usize, 1u8) - as u8, + >>::raw_get_const::< + 15usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -786,23 +1101,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 15usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m16(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<16usize, 1u8>() as u8) + } } #[inline] pub fn set_m16(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 1u8, val as u64) + self._bitfield_1.set_const::<16usize, 1u8>(val as u64) } } #[inline] @@ -811,8 +1126,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 16usize, 1u8) - as u8, + >>::raw_get_const::< + 16usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -822,23 +1139,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 16usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m17(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(17usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<17usize, 1u8>() as u8) + } } #[inline] pub fn set_m17(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(17usize, 1u8, val as u64) + self._bitfield_1.set_const::<17usize, 1u8>(val as u64) } } #[inline] @@ -847,8 +1164,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 17usize, 1u8) - as u8, + >>::raw_get_const::< + 17usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -858,23 +1177,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 17usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m18(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(18usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<18usize, 1u8>() as u8) + } } #[inline] pub fn set_m18(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(18usize, 1u8, val as u64) + self._bitfield_1.set_const::<18usize, 1u8>(val as u64) } } #[inline] @@ -883,8 +1202,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 18usize, 1u8) - as u8, + >>::raw_get_const::< + 18usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -894,23 +1215,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 18usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m19(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(19usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<19usize, 1u8>() as u8) + } } #[inline] pub fn set_m19(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(19usize, 1u8, val as u64) + self._bitfield_1.set_const::<19usize, 1u8>(val as u64) } } #[inline] @@ -919,8 +1240,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 19usize, 1u8) - as u8, + >>::raw_get_const::< + 19usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -930,23 +1253,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 19usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m20(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(20usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<20usize, 1u8>() as u8) + } } #[inline] pub fn set_m20(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(20usize, 1u8, val as u64) + self._bitfield_1.set_const::<20usize, 1u8>(val as u64) } } #[inline] @@ -955,8 +1278,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 20usize, 1u8) - as u8, + >>::raw_get_const::< + 20usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -966,23 +1291,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 20usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m21(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(21usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<21usize, 1u8>() as u8) + } } #[inline] pub fn set_m21(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(21usize, 1u8, val as u64) + self._bitfield_1.set_const::<21usize, 1u8>(val as u64) } } #[inline] @@ -991,8 +1316,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 21usize, 1u8) - as u8, + >>::raw_get_const::< + 21usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1002,23 +1329,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 21usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m22(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(22usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<22usize, 1u8>() as u8) + } } #[inline] pub fn set_m22(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(22usize, 1u8, val as u64) + self._bitfield_1.set_const::<22usize, 1u8>(val as u64) } } #[inline] @@ -1027,8 +1354,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 22usize, 1u8) - as u8, + >>::raw_get_const::< + 22usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1038,23 +1367,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 22usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m23(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(23usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<23usize, 1u8>() as u8) + } } #[inline] pub fn set_m23(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(23usize, 1u8, val as u64) + self._bitfield_1.set_const::<23usize, 1u8>(val as u64) } } #[inline] @@ -1063,8 +1392,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 23usize, 1u8) - as u8, + >>::raw_get_const::< + 23usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1074,23 +1405,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 23usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m24(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<24usize, 1u8>() as u8) + } } #[inline] pub fn set_m24(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(24usize, 1u8, val as u64) + self._bitfield_1.set_const::<24usize, 1u8>(val as u64) } } #[inline] @@ -1099,8 +1430,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 24usize, 1u8) - as u8, + >>::raw_get_const::< + 24usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1110,23 +1443,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 24usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m25(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(25usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<25usize, 1u8>() as u8) + } } #[inline] pub fn set_m25(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(25usize, 1u8, val as u64) + self._bitfield_1.set_const::<25usize, 1u8>(val as u64) } } #[inline] @@ -1135,8 +1468,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 25usize, 1u8) - as u8, + >>::raw_get_const::< + 25usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1146,23 +1481,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 25usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m26(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(26usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<26usize, 1u8>() as u8) + } } #[inline] pub fn set_m26(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(26usize, 1u8, val as u64) + self._bitfield_1.set_const::<26usize, 1u8>(val as u64) } } #[inline] @@ -1171,8 +1506,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 26usize, 1u8) - as u8, + >>::raw_get_const::< + 26usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1182,23 +1519,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 26usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m27(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(27usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<27usize, 1u8>() as u8) + } } #[inline] pub fn set_m27(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(27usize, 1u8, val as u64) + self._bitfield_1.set_const::<27usize, 1u8>(val as u64) } } #[inline] @@ -1207,8 +1544,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 27usize, 1u8) - as u8, + >>::raw_get_const::< + 27usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1218,23 +1557,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 27usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m28(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(28usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<28usize, 1u8>() as u8) + } } #[inline] pub fn set_m28(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(28usize, 1u8, val as u64) + self._bitfield_1.set_const::<28usize, 1u8>(val as u64) } } #[inline] @@ -1243,8 +1582,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 28usize, 1u8) - as u8, + >>::raw_get_const::< + 28usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1254,23 +1595,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 28usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m29(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(29usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<29usize, 1u8>() as u8) + } } #[inline] pub fn set_m29(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(29usize, 1u8, val as u64) + self._bitfield_1.set_const::<29usize, 1u8>(val as u64) } } #[inline] @@ -1279,8 +1620,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 29usize, 1u8) - as u8, + >>::raw_get_const::< + 29usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1290,23 +1633,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 29usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m30(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(30usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<30usize, 1u8>() as u8) + } } #[inline] pub fn set_m30(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(30usize, 1u8, val as u64) + self._bitfield_1.set_const::<30usize, 1u8>(val as u64) } } #[inline] @@ -1315,8 +1658,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 30usize, 1u8) - as u8, + >>::raw_get_const::< + 30usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1326,23 +1671,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 30usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m31(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(31usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<31usize, 1u8>() as u8) + } } #[inline] pub fn set_m31(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(31usize, 1u8, val as u64) + self._bitfield_1.set_const::<31usize, 1u8>(val as u64) } } #[inline] @@ -1351,8 +1696,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 31usize, 1u8) - as u8, + >>::raw_get_const::< + 31usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1362,23 +1709,23 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 31usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m32(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(32usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<32usize, 1u8>() as u8) + } } #[inline] pub fn set_m32(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(32usize, 1u8, val as u64) + self._bitfield_1.set_const::<32usize, 1u8>(val as u64) } } #[inline] @@ -1387,8 +1734,10 @@ impl MuchBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 32usize, 1u8) - as u8, + >>::raw_get_const::< + 32usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1398,12 +1747,10 @@ impl MuchBitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 5usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 32usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -1444,302 +1791,269 @@ impl MuchBitfield { ) -> __BindgenBitfieldUnit<[u8; 5usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 5usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let m0: u8 = unsafe { ::std::mem::transmute(m0) }; - m0 as u64 - }, - ); + >({ + let m0: u8 = unsafe { ::std::mem::transmute(m0) }; + m0 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let m1: u8 = unsafe { ::std::mem::transmute(m1) }; - m1 as u64 - }, - ); + >({ + let m1: u8 = unsafe { ::std::mem::transmute(m1) }; + m1 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let m2: u8 = unsafe { ::std::mem::transmute(m2) }; - m2 as u64 - }, - ); + >({ + let m2: u8 = unsafe { ::std::mem::transmute(m2) }; + m2 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 3usize, 1u8, - { - let m3: u8 = unsafe { ::std::mem::transmute(m3) }; - m3 as u64 - }, - ); + >({ + let m3: u8 = unsafe { ::std::mem::transmute(m3) }; + m3 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 1u8, - { - let m4: u8 = unsafe { ::std::mem::transmute(m4) }; - m4 as u64 - }, - ); + >({ + let m4: u8 = unsafe { ::std::mem::transmute(m4) }; + m4 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 5usize, 1u8, - { - let m5: u8 = unsafe { ::std::mem::transmute(m5) }; - m5 as u64 - }, - ); + >({ + let m5: u8 = unsafe { ::std::mem::transmute(m5) }; + m5 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 6usize, 1u8, - { - let m6: u8 = unsafe { ::std::mem::transmute(m6) }; - m6 as u64 - }, - ); + >({ + let m6: u8 = unsafe { ::std::mem::transmute(m6) }; + m6 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 7usize, 1u8, - { - let m7: u8 = unsafe { ::std::mem::transmute(m7) }; - m7 as u64 - }, - ); + >({ + let m7: u8 = unsafe { ::std::mem::transmute(m7) }; + m7 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 8usize, 1u8, - { - let m8: u8 = unsafe { ::std::mem::transmute(m8) }; - m8 as u64 - }, - ); + >({ + let m8: u8 = unsafe { ::std::mem::transmute(m8) }; + m8 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 9usize, 1u8, - { - let m9: u8 = unsafe { ::std::mem::transmute(m9) }; - m9 as u64 - }, - ); + >({ + let m9: u8 = unsafe { ::std::mem::transmute(m9) }; + m9 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 10usize, 1u8, - { - let m10: u8 = unsafe { ::std::mem::transmute(m10) }; - m10 as u64 - }, - ); + >({ + let m10: u8 = unsafe { ::std::mem::transmute(m10) }; + m10 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 11usize, 1u8, - { - let m11: u8 = unsafe { ::std::mem::transmute(m11) }; - m11 as u64 - }, - ); + >({ + let m11: u8 = unsafe { ::std::mem::transmute(m11) }; + m11 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 12usize, 1u8, - { - let m12: u8 = unsafe { ::std::mem::transmute(m12) }; - m12 as u64 - }, - ); + >({ + let m12: u8 = unsafe { ::std::mem::transmute(m12) }; + m12 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 13usize, 1u8, - { - let m13: u8 = unsafe { ::std::mem::transmute(m13) }; - m13 as u64 - }, - ); + >({ + let m13: u8 = unsafe { ::std::mem::transmute(m13) }; + m13 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 14usize, 1u8, - { - let m14: u8 = unsafe { ::std::mem::transmute(m14) }; - m14 as u64 - }, - ); + >({ + let m14: u8 = unsafe { ::std::mem::transmute(m14) }; + m14 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 15usize, 1u8, - { - let m15: u8 = unsafe { ::std::mem::transmute(m15) }; - m15 as u64 - }, - ); + >({ + let m15: u8 = unsafe { ::std::mem::transmute(m15) }; + m15 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 1u8, - { - let m16: u8 = unsafe { ::std::mem::transmute(m16) }; - m16 as u64 - }, - ); + >({ + let m16: u8 = unsafe { ::std::mem::transmute(m16) }; + m16 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 17usize, 1u8, - { - let m17: u8 = unsafe { ::std::mem::transmute(m17) }; - m17 as u64 - }, - ); + >({ + let m17: u8 = unsafe { ::std::mem::transmute(m17) }; + m17 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 18usize, 1u8, - { - let m18: u8 = unsafe { ::std::mem::transmute(m18) }; - m18 as u64 - }, - ); + >({ + let m18: u8 = unsafe { ::std::mem::transmute(m18) }; + m18 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 19usize, 1u8, - { - let m19: u8 = unsafe { ::std::mem::transmute(m19) }; - m19 as u64 - }, - ); + >({ + let m19: u8 = unsafe { ::std::mem::transmute(m19) }; + m19 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 20usize, 1u8, - { - let m20: u8 = unsafe { ::std::mem::transmute(m20) }; - m20 as u64 - }, - ); + >({ + let m20: u8 = unsafe { ::std::mem::transmute(m20) }; + m20 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 21usize, 1u8, - { - let m21: u8 = unsafe { ::std::mem::transmute(m21) }; - m21 as u64 - }, - ); + >({ + let m21: u8 = unsafe { ::std::mem::transmute(m21) }; + m21 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 22usize, 1u8, - { - let m22: u8 = unsafe { ::std::mem::transmute(m22) }; - m22 as u64 - }, - ); + >({ + let m22: u8 = unsafe { ::std::mem::transmute(m22) }; + m22 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 23usize, 1u8, - { - let m23: u8 = unsafe { ::std::mem::transmute(m23) }; - m23 as u64 - }, - ); + >({ + let m23: u8 = unsafe { ::std::mem::transmute(m23) }; + m23 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 24usize, 1u8, - { - let m24: u8 = unsafe { ::std::mem::transmute(m24) }; - m24 as u64 - }, - ); + >({ + let m24: u8 = unsafe { ::std::mem::transmute(m24) }; + m24 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 25usize, 1u8, - { - let m25: u8 = unsafe { ::std::mem::transmute(m25) }; - m25 as u64 - }, - ); + >({ + let m25: u8 = unsafe { ::std::mem::transmute(m25) }; + m25 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 26usize, 1u8, - { - let m26: u8 = unsafe { ::std::mem::transmute(m26) }; - m26 as u64 - }, - ); + >({ + let m26: u8 = unsafe { ::std::mem::transmute(m26) }; + m26 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 27usize, 1u8, - { - let m27: u8 = unsafe { ::std::mem::transmute(m27) }; - m27 as u64 - }, - ); + >({ + let m27: u8 = unsafe { ::std::mem::transmute(m27) }; + m27 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 28usize, 1u8, - { - let m28: u8 = unsafe { ::std::mem::transmute(m28) }; - m28 as u64 - }, - ); + >({ + let m28: u8 = unsafe { ::std::mem::transmute(m28) }; + m28 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 29usize, 1u8, - { - let m29: u8 = unsafe { ::std::mem::transmute(m29) }; - m29 as u64 - }, - ); + >({ + let m29: u8 = unsafe { ::std::mem::transmute(m29) }; + m29 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 30usize, 1u8, - { - let m30: u8 = unsafe { ::std::mem::transmute(m30) }; - m30 as u64 - }, - ); + >({ + let m30: u8 = unsafe { ::std::mem::transmute(m30) }; + m30 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 31usize, 1u8, - { - let m31: u8 = unsafe { ::std::mem::transmute(m31) }; - m31 as u64 - }, - ); + >({ + let m31: u8 = unsafe { ::std::mem::transmute(m31) }; + m31 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 32usize, 1u8, - { - let m32: u8 = unsafe { ::std::mem::transmute(m32) }; - m32 as u64 - }, - ); + >({ + let m32: u8 = unsafe { ::std::mem::transmute(m32) }; + m32 as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield-large.rs b/bindgen-tests/tests/expectations/tests/bitfield-large.rs index 45d8198c12..db4fd63334 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield-large.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield-large.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[repr(align(16))] #[derive(Debug, Default, Copy, Clone)] @@ -231,13 +502,15 @@ const _: () = { impl HasBigBitfield { #[inline] pub fn x(&self) -> i128 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 128u8) as u128) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 128u8>() as u128) + } } #[inline] pub fn set_x(&mut self, val: i128) { unsafe { let val: u128 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 128u8, val as u64) + self._bitfield_1.set_const::<0usize, 128u8>(val as u64) } } #[inline] @@ -246,8 +519,10 @@ impl HasBigBitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 128u8) - as u128, + >>::raw_get_const::< + 0usize, + 128u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u128, ) } } @@ -257,26 +532,23 @@ impl HasBigBitfield { let val: u128 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 128u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(x: i128) -> __BindgenBitfieldUnit<[u8; 16usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 16usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 128u8, - { - let x: u128 = unsafe { ::std::mem::transmute(x) }; - x as u64 - }, - ); + >({ + let x: u128 = unsafe { ::std::mem::transmute(x) }; + x as u64 + }); __bindgen_bitfield_unit } } @@ -298,13 +570,15 @@ const _: () = { impl HasTwoBigBitfields { #[inline] pub fn x(&self) -> i128 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 80u8) as u128) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 80u8>() as u128) + } } #[inline] pub fn set_x(&mut self, val: i128) { unsafe { let val: u128 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 80u8, val as u64) + self._bitfield_1.set_const::<0usize, 80u8>(val as u64) } } #[inline] @@ -313,8 +587,10 @@ impl HasTwoBigBitfields { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 80u8) - as u128, + >>::raw_get_const::< + 0usize, + 80u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u128, ) } } @@ -324,23 +600,23 @@ impl HasTwoBigBitfields { let val: u128 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 80u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn y(&self) -> i128 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(80usize, 48u8) as u128) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<80usize, 48u8>() as u128) + } } #[inline] pub fn set_y(&mut self, val: i128) { unsafe { let val: u128 = ::std::mem::transmute(val); - self._bitfield_1.set(80usize, 48u8, val as u64) + self._bitfield_1.set_const::<80usize, 48u8>(val as u64) } } #[inline] @@ -349,8 +625,10 @@ impl HasTwoBigBitfields { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 80usize, 48u8) - as u128, + >>::raw_get_const::< + 80usize, + 48u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u128, ) } } @@ -360,35 +638,31 @@ impl HasTwoBigBitfields { let val: u128 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 80usize, 48u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(x: i128, y: i128) -> __BindgenBitfieldUnit<[u8; 16usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 16usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 80u8, - { - let x: u128 = unsafe { ::std::mem::transmute(x) }; - x as u64 - }, - ); + >({ + let x: u128 = unsafe { ::std::mem::transmute(x) }; + x as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 80usize, 48u8, - { - let y: u128 = unsafe { ::std::mem::transmute(y) }; - y as u64 - }, - ); + >({ + let y: u128 = unsafe { ::std::mem::transmute(y) }; + y as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield-linux-32.rs b/bindgen-tests/tests/expectations/tests/bitfield-linux-32.rs index 9fd3b3d7a5..8bac523f99 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield-linux-32.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield-linux-32.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct Test { @@ -226,13 +497,15 @@ pub struct Test { impl Test { #[inline] pub fn x(&self) -> u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 56u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 56u8>() as u64) + } } #[inline] pub fn set_x(&mut self, val: u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 56u8, val as u64) + self._bitfield_1.set_const::<0usize, 56u8>(val as u64) } } #[inline] @@ -241,8 +514,10 @@ impl Test { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 56u8) - as u64, + >>::raw_get_const::< + 0usize, + 56u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -252,23 +527,23 @@ impl Test { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 56u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn y(&self) -> u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(56usize, 8u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<56usize, 8u8>() as u64) + } } #[inline] pub fn set_y(&mut self, val: u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(56usize, 8u8, val as u64) + self._bitfield_1.set_const::<56usize, 8u8>(val as u64) } } #[inline] @@ -277,8 +552,10 @@ impl Test { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 56usize, 8u8) - as u64, + >>::raw_get_const::< + 56usize, + 8u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -288,35 +565,31 @@ impl Test { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 56usize, 8u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(x: u64, y: u64) -> __BindgenBitfieldUnit<[u8; 8usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 56u8, - { - let x: u64 = unsafe { ::std::mem::transmute(x) }; - x as u64 - }, - ); + >({ + let x: u64 = unsafe { ::std::mem::transmute(x) }; + x as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 56usize, 8u8, - { - let y: u64 = unsafe { ::std::mem::transmute(y) }; - y as u64 - }, - ); + >({ + let y: u64 = unsafe { ::std::mem::transmute(y) }; + y as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield-method-same-name.rs b/bindgen-tests/tests/expectations/tests/bitfield-method-same-name.rs index 737b10fbc9..70fc98db86 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield-method-same-name.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield-method-same-name.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct Foo { @@ -242,13 +513,15 @@ unsafe extern "C" { impl Foo { #[inline] pub fn type__bindgen_bitfield(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 3u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 3u8>() as u8) + } } #[inline] pub fn set_type__bindgen_bitfield(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 3u8, val as u64) + self._bitfield_1.set_const::<0usize, 3u8>(val as u64) } } #[inline] @@ -259,7 +532,10 @@ impl Foo { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 3u8) as u8, + >>::raw_get_const::< + 0usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -272,12 +548,10 @@ impl Foo { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -286,16 +560,15 @@ impl Foo { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 3u8, - { - let type__bindgen_bitfield: u8 = unsafe { - ::std::mem::transmute(type__bindgen_bitfield) - }; - type__bindgen_bitfield as u64 - }, - ); + >({ + let type__bindgen_bitfield: u8 = unsafe { + ::std::mem::transmute(type__bindgen_bitfield) + }; + type__bindgen_bitfield as u64 + }); __bindgen_bitfield_unit } #[inline] diff --git a/bindgen-tests/tests/expectations/tests/bitfield-template.rs b/bindgen-tests/tests/expectations/tests/bitfield-template.rs index 9639db7cbb..5678344882 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield-template.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield-template.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct foo { @@ -236,13 +507,15 @@ impl Default for foo { impl foo { #[inline] pub fn b(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 8u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 8u8>() as u8) + } } #[inline] pub fn set_b(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 8u8, val as u64) + self._bitfield_1.set_const::<0usize, 8u8>(val as u64) } } #[inline] @@ -251,7 +524,10 @@ impl foo { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 8u8) as u8, + >>::raw_get_const::< + 0usize, + 8u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -261,26 +537,23 @@ impl foo { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 8u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(b: bool) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 8u8, - { - let b: u8 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield_align.rs b/bindgen-tests/tests/expectations/tests/bitfield_align.rs index 58302c5d0f..c7f50fce6d 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield_align.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield_align.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct A { @@ -235,13 +506,15 @@ const _: () = { impl A { #[inline] pub fn b1(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u32) + } } #[inline] pub fn set_b1(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -250,8 +523,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u32, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -261,23 +536,23 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b2(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u32) + } } #[inline] pub fn set_b2(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -286,8 +561,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) - as u32, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -297,23 +574,23 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b3(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u32) + } } #[inline] pub fn set_b3(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -322,8 +599,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) - as u32, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -333,23 +612,23 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b4(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<3usize, 1u8>() as u32) + } } #[inline] pub fn set_b4(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(3usize, 1u8, val as u64) + self._bitfield_1.set_const::<3usize, 1u8>(val as u64) } } #[inline] @@ -358,8 +637,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 3usize, 1u8) - as u32, + >>::raw_get_const::< + 3usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -369,23 +650,23 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 3usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b5(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 1u8>() as u32) + } } #[inline] pub fn set_b5(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 1u8, val as u64) + self._bitfield_1.set_const::<4usize, 1u8>(val as u64) } } #[inline] @@ -394,8 +675,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 1u8) - as u32, + >>::raw_get_const::< + 4usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -405,23 +688,23 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b6(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<5usize, 1u8>() as u32) + } } #[inline] pub fn set_b6(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(5usize, 1u8, val as u64) + self._bitfield_1.set_const::<5usize, 1u8>(val as u64) } } #[inline] @@ -430,8 +713,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 5usize, 1u8) - as u32, + >>::raw_get_const::< + 5usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -441,23 +726,23 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 5usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b7(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<6usize, 1u8>() as u32) + } } #[inline] pub fn set_b7(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(6usize, 1u8, val as u64) + self._bitfield_1.set_const::<6usize, 1u8>(val as u64) } } #[inline] @@ -466,8 +751,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 6usize, 1u8) - as u32, + >>::raw_get_const::< + 6usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -477,23 +764,23 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 6usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b8(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(7usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<7usize, 1u8>() as u32) + } } #[inline] pub fn set_b8(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(7usize, 1u8, val as u64) + self._bitfield_1.set_const::<7usize, 1u8>(val as u64) } } #[inline] @@ -502,8 +789,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 7usize, 1u8) - as u32, + >>::raw_get_const::< + 7usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -513,23 +802,23 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 7usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b9(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<8usize, 1u8>() as u32) + } } #[inline] pub fn set_b9(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(8usize, 1u8, val as u64) + self._bitfield_1.set_const::<8usize, 1u8>(val as u64) } } #[inline] @@ -538,8 +827,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 8usize, 1u8) - as u32, + >>::raw_get_const::< + 8usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -549,23 +840,23 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 8usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b10(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(9usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<9usize, 1u8>() as u32) + } } #[inline] pub fn set_b10(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(9usize, 1u8, val as u64) + self._bitfield_1.set_const::<9usize, 1u8>(val as u64) } } #[inline] @@ -574,8 +865,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 9usize, 1u8) - as u32, + >>::raw_get_const::< + 9usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -585,12 +878,10 @@ impl A { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 9usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -608,95 +899,85 @@ impl A { ) -> __BindgenBitfieldUnit<[u8; 2usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 2usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let b1: u32 = unsafe { ::std::mem::transmute(b1) }; - b1 as u64 - }, - ); + >({ + let b1: u32 = unsafe { ::std::mem::transmute(b1) }; + b1 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let b2: u32 = unsafe { ::std::mem::transmute(b2) }; - b2 as u64 - }, - ); + >({ + let b2: u32 = unsafe { ::std::mem::transmute(b2) }; + b2 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let b3: u32 = unsafe { ::std::mem::transmute(b3) }; - b3 as u64 - }, - ); + >({ + let b3: u32 = unsafe { ::std::mem::transmute(b3) }; + b3 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 3usize, 1u8, - { - let b4: u32 = unsafe { ::std::mem::transmute(b4) }; - b4 as u64 - }, - ); + >({ + let b4: u32 = unsafe { ::std::mem::transmute(b4) }; + b4 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 1u8, - { - let b5: u32 = unsafe { ::std::mem::transmute(b5) }; - b5 as u64 - }, - ); + >({ + let b5: u32 = unsafe { ::std::mem::transmute(b5) }; + b5 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 5usize, 1u8, - { - let b6: u32 = unsafe { ::std::mem::transmute(b6) }; - b6 as u64 - }, - ); + >({ + let b6: u32 = unsafe { ::std::mem::transmute(b6) }; + b6 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 6usize, 1u8, - { - let b7: u32 = unsafe { ::std::mem::transmute(b7) }; - b7 as u64 - }, - ); + >({ + let b7: u32 = unsafe { ::std::mem::transmute(b7) }; + b7 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 7usize, 1u8, - { - let b8: u32 = unsafe { ::std::mem::transmute(b8) }; - b8 as u64 - }, - ); + >({ + let b8: u32 = unsafe { ::std::mem::transmute(b8) }; + b8 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 8usize, 1u8, - { - let b9: u32 = unsafe { ::std::mem::transmute(b9) }; - b9 as u64 - }, - ); + >({ + let b9: u32 = unsafe { ::std::mem::transmute(b9) }; + b9 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 9usize, 1u8, - { - let b10: u32 = unsafe { ::std::mem::transmute(b10) }; - b10 as u64 - }, - ); + >({ + let b10: u32 = unsafe { ::std::mem::transmute(b10) }; + b10 as u64 + }); __bindgen_bitfield_unit } } @@ -714,13 +995,15 @@ const _: () = { impl B { #[inline] pub fn foo(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 31u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 31u8>() as u32) + } } #[inline] pub fn set_foo(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 31u8, val as u64) + self._bitfield_1.set_const::<0usize, 31u8>(val as u64) } } #[inline] @@ -729,8 +1012,10 @@ impl B { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 31u8) - as u32, + >>::raw_get_const::< + 0usize, + 31u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -740,23 +1025,23 @@ impl B { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 31u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bar(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(31usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<31usize, 1u8>() as u8) + } } #[inline] pub fn set_bar(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(31usize, 1u8, val as u64) + self._bitfield_1.set_const::<31usize, 1u8>(val as u64) } } #[inline] @@ -765,8 +1050,10 @@ impl B { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 31usize, 1u8) - as u8, + >>::raw_get_const::< + 31usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -776,12 +1063,10 @@ impl B { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 31usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -791,23 +1076,21 @@ impl B { ) -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 31u8, - { - let foo: u32 = unsafe { ::std::mem::transmute(foo) }; - foo as u64 - }, - ); + >({ + let foo: u32 = unsafe { ::std::mem::transmute(foo) }; + foo as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 31usize, 1u8, - { - let bar: u8 = unsafe { ::std::mem::transmute(bar) }; - bar as u64 - }, - ); + >({ + let bar: u8 = unsafe { ::std::mem::transmute(bar) }; + bar as u64 + }); __bindgen_bitfield_unit } } @@ -828,13 +1111,15 @@ const _: () = { impl C { #[inline] pub fn b1(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u32) + } } #[inline] pub fn set_b1(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -843,8 +1128,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u32, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -854,23 +1141,23 @@ impl C { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b2(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u32) + } } #[inline] pub fn set_b2(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -879,8 +1166,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) - as u32, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -890,12 +1179,10 @@ impl C { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -905,23 +1192,21 @@ impl C { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let b1: u32 = unsafe { ::std::mem::transmute(b1) }; - b1 as u64 - }, - ); + >({ + let b1: u32 = unsafe { ::std::mem::transmute(b1) }; + b1 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let b2: u32 = unsafe { ::std::mem::transmute(b2) }; - b2 as u64 - }, - ); + >({ + let b2: u32 = unsafe { ::std::mem::transmute(b2) }; + b2 as u64 + }); __bindgen_bitfield_unit } } @@ -940,13 +1225,15 @@ const _: () = { impl Date1 { #[inline] pub fn nWeekDay(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 3u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 3u8>() as u16) + } } #[inline] pub fn set_nWeekDay(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 3u8, val as u64) + self._bitfield_1.set_const::<0usize, 3u8>(val as u64) } } #[inline] @@ -955,8 +1242,10 @@ impl Date1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 3u8) - as u16, + >>::raw_get_const::< + 0usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -966,23 +1255,23 @@ impl Date1 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn nMonthDay(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 6u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<3usize, 6u8>() as u16) + } } #[inline] pub fn set_nMonthDay(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(3usize, 6u8, val as u64) + self._bitfield_1.set_const::<3usize, 6u8>(val as u64) } } #[inline] @@ -991,8 +1280,10 @@ impl Date1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 3usize, 6u8) - as u16, + >>::raw_get_const::< + 3usize, + 6u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1002,23 +1293,23 @@ impl Date1 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 3usize, 6u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn nMonth(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(9usize, 5u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<9usize, 5u8>() as u16) + } } #[inline] pub fn set_nMonth(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(9usize, 5u8, val as u64) + self._bitfield_1.set_const::<9usize, 5u8>(val as u64) } } #[inline] @@ -1027,8 +1318,10 @@ impl Date1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 9usize, 5u8) - as u16, + >>::raw_get_const::< + 9usize, + 5u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1038,23 +1331,23 @@ impl Date1 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 9usize, 5u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn nYear(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 8u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<16usize, 8u8>() as u16) + } } #[inline] pub fn set_nYear(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 8u8, val as u64) + self._bitfield_1.set_const::<16usize, 8u8>(val as u64) } } #[inline] @@ -1063,8 +1356,10 @@ impl Date1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 16usize, 8u8) - as u16, + >>::raw_get_const::< + 16usize, + 8u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1074,12 +1369,10 @@ impl Date1 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 16usize, 8u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -1091,41 +1384,37 @@ impl Date1 { ) -> __BindgenBitfieldUnit<[u8; 3usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 3usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 3u8, - { - let nWeekDay: u16 = unsafe { ::std::mem::transmute(nWeekDay) }; - nWeekDay as u64 - }, - ); + >({ + let nWeekDay: u16 = unsafe { ::std::mem::transmute(nWeekDay) }; + nWeekDay as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 3usize, 6u8, - { - let nMonthDay: u16 = unsafe { ::std::mem::transmute(nMonthDay) }; - nMonthDay as u64 - }, - ); + >({ + let nMonthDay: u16 = unsafe { ::std::mem::transmute(nMonthDay) }; + nMonthDay as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 9usize, 5u8, - { - let nMonth: u16 = unsafe { ::std::mem::transmute(nMonth) }; - nMonth as u64 - }, - ); + >({ + let nMonth: u16 = unsafe { ::std::mem::transmute(nMonth) }; + nMonth as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 8u8, - { - let nYear: u16 = unsafe { ::std::mem::transmute(nYear) }; - nYear as u64 - }, - ); + >({ + let nYear: u16 = unsafe { ::std::mem::transmute(nYear) }; + nYear as u64 + }); __bindgen_bitfield_unit } } @@ -1143,13 +1432,15 @@ const _: () = { impl Date2 { #[inline] pub fn nWeekDay(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 3u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 3u8>() as u16) + } } #[inline] pub fn set_nWeekDay(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 3u8, val as u64) + self._bitfield_1.set_const::<0usize, 3u8>(val as u64) } } #[inline] @@ -1158,8 +1449,10 @@ impl Date2 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 3u8) - as u16, + >>::raw_get_const::< + 0usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1169,23 +1462,23 @@ impl Date2 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn nMonthDay(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 6u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<3usize, 6u8>() as u16) + } } #[inline] pub fn set_nMonthDay(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(3usize, 6u8, val as u64) + self._bitfield_1.set_const::<3usize, 6u8>(val as u64) } } #[inline] @@ -1194,8 +1487,10 @@ impl Date2 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 3usize, 6u8) - as u16, + >>::raw_get_const::< + 3usize, + 6u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1205,23 +1500,23 @@ impl Date2 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 3usize, 6u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn nMonth(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(9usize, 5u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<9usize, 5u8>() as u16) + } } #[inline] pub fn set_nMonth(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(9usize, 5u8, val as u64) + self._bitfield_1.set_const::<9usize, 5u8>(val as u64) } } #[inline] @@ -1230,8 +1525,10 @@ impl Date2 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 9usize, 5u8) - as u16, + >>::raw_get_const::< + 9usize, + 5u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1241,23 +1538,23 @@ impl Date2 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 9usize, 5u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn nYear(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 8u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<16usize, 8u8>() as u16) + } } #[inline] pub fn set_nYear(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 8u8, val as u64) + self._bitfield_1.set_const::<16usize, 8u8>(val as u64) } } #[inline] @@ -1266,8 +1563,10 @@ impl Date2 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 16usize, 8u8) - as u16, + >>::raw_get_const::< + 16usize, + 8u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1277,23 +1576,23 @@ impl Date2 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 16usize, 8u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn byte(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 8u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<24usize, 8u8>() as u8) + } } #[inline] pub fn set_byte(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(24usize, 8u8, val as u64) + self._bitfield_1.set_const::<24usize, 8u8>(val as u64) } } #[inline] @@ -1302,8 +1601,10 @@ impl Date2 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 24usize, 8u8) - as u8, + >>::raw_get_const::< + 24usize, + 8u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -1313,12 +1614,10 @@ impl Date2 { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 24usize, 8u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -1331,50 +1630,45 @@ impl Date2 { ) -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 3u8, - { - let nWeekDay: u16 = unsafe { ::std::mem::transmute(nWeekDay) }; - nWeekDay as u64 - }, - ); + >({ + let nWeekDay: u16 = unsafe { ::std::mem::transmute(nWeekDay) }; + nWeekDay as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 3usize, 6u8, - { - let nMonthDay: u16 = unsafe { ::std::mem::transmute(nMonthDay) }; - nMonthDay as u64 - }, - ); + >({ + let nMonthDay: u16 = unsafe { ::std::mem::transmute(nMonthDay) }; + nMonthDay as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 9usize, 5u8, - { - let nMonth: u16 = unsafe { ::std::mem::transmute(nMonth) }; - nMonth as u64 - }, - ); + >({ + let nMonth: u16 = unsafe { ::std::mem::transmute(nMonth) }; + nMonth as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 8u8, - { - let nYear: u16 = unsafe { ::std::mem::transmute(nYear) }; - nYear as u64 - }, - ); + >({ + let nYear: u16 = unsafe { ::std::mem::transmute(nYear) }; + nYear as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 24usize, 8u8, - { - let byte: u8 = unsafe { ::std::mem::transmute(byte) }; - byte as u64 - }, - ); + >({ + let byte: u8 = unsafe { ::std::mem::transmute(byte) }; + byte as u64 + }); __bindgen_bitfield_unit } } @@ -1394,13 +1688,15 @@ const _: () = { impl Date3 { #[inline] pub fn nWeekDay(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 3u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 3u8>() as u16) + } } #[inline] pub fn set_nWeekDay(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 3u8, val as u64) + self._bitfield_1.set_const::<0usize, 3u8>(val as u64) } } #[inline] @@ -1409,8 +1705,10 @@ impl Date3 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 3u8) - as u16, + >>::raw_get_const::< + 0usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1420,23 +1718,23 @@ impl Date3 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn nMonthDay(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 6u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<3usize, 6u8>() as u16) + } } #[inline] pub fn set_nMonthDay(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(3usize, 6u8, val as u64) + self._bitfield_1.set_const::<3usize, 6u8>(val as u64) } } #[inline] @@ -1445,8 +1743,10 @@ impl Date3 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 3usize, 6u8) - as u16, + >>::raw_get_const::< + 3usize, + 6u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1456,23 +1756,23 @@ impl Date3 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 3usize, 6u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn nMonth(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(9usize, 5u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<9usize, 5u8>() as u16) + } } #[inline] pub fn set_nMonth(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(9usize, 5u8, val as u64) + self._bitfield_1.set_const::<9usize, 5u8>(val as u64) } } #[inline] @@ -1481,8 +1781,10 @@ impl Date3 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 9usize, 5u8) - as u16, + >>::raw_get_const::< + 9usize, + 5u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1492,23 +1794,23 @@ impl Date3 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 9usize, 5u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn nYear(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 8u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<16usize, 8u8>() as u16) + } } #[inline] pub fn set_nYear(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 8u8, val as u64) + self._bitfield_1.set_const::<16usize, 8u8>(val as u64) } } #[inline] @@ -1517,8 +1819,10 @@ impl Date3 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 16usize, 8u8) - as u16, + >>::raw_get_const::< + 16usize, + 8u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -1528,12 +1832,10 @@ impl Date3 { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 16usize, 8u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -1545,41 +1847,37 @@ impl Date3 { ) -> __BindgenBitfieldUnit<[u8; 3usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 3usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 3u8, - { - let nWeekDay: u16 = unsafe { ::std::mem::transmute(nWeekDay) }; - nWeekDay as u64 - }, - ); + >({ + let nWeekDay: u16 = unsafe { ::std::mem::transmute(nWeekDay) }; + nWeekDay as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 3usize, 6u8, - { - let nMonthDay: u16 = unsafe { ::std::mem::transmute(nMonthDay) }; - nMonthDay as u64 - }, - ); + >({ + let nMonthDay: u16 = unsafe { ::std::mem::transmute(nMonthDay) }; + nMonthDay as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 9usize, 5u8, - { - let nMonth: u16 = unsafe { ::std::mem::transmute(nMonth) }; - nMonth as u64 - }, - ); + >({ + let nMonth: u16 = unsafe { ::std::mem::transmute(nMonth) }; + nMonth as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 8u8, - { - let nYear: u16 = unsafe { ::std::mem::transmute(nYear) }; - nYear as u64 - }, - ); + >({ + let nYear: u16 = unsafe { ::std::mem::transmute(nYear) }; + nYear as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield_align_2.rs b/bindgen-tests/tests/expectations/tests/bitfield_align_2.rs index e568c8ded3..9d5591e5a5 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield_align_2.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield_align_2.rs @@ -218,6 +218,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum MyEnum { @@ -249,13 +520,15 @@ impl Default for TaggedPtr { impl TaggedPtr { #[inline] pub fn tag(&self) -> MyEnum { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 2u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 2u8>() as u32) + } } #[inline] pub fn set_tag(&mut self, val: MyEnum) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 2u8, val as u64) + self._bitfield_1.set_const::<0usize, 2u8>(val as u64) } } #[inline] @@ -264,8 +537,10 @@ impl TaggedPtr { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 2u8) - as u32, + >>::raw_get_const::< + 0usize, + 2u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -275,23 +550,23 @@ impl TaggedPtr { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 2u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn ptr(&self) -> ::std::os::raw::c_long { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 62u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 62u8>() as u64) + } } #[inline] pub fn set_ptr(&mut self, val: ::std::os::raw::c_long) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 62u8, val as u64) + self._bitfield_1.set_const::<2usize, 62u8>(val as u64) } } #[inline] @@ -300,8 +575,10 @@ impl TaggedPtr { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 62u8) - as u64, + >>::raw_get_const::< + 2usize, + 62u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -311,12 +588,10 @@ impl TaggedPtr { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 62u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -326,23 +601,21 @@ impl TaggedPtr { ) -> __BindgenBitfieldUnit<[u8; 8usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 2u8, - { - let tag: u32 = unsafe { ::std::mem::transmute(tag) }; - tag as u64 - }, - ); + >({ + let tag: u32 = unsafe { ::std::mem::transmute(tag) }; + tag as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 62u8, - { - let ptr: u64 = unsafe { ::std::mem::transmute(ptr) }; - ptr as u64 - }, - ); + >({ + let ptr: u64 = unsafe { ::std::mem::transmute(ptr) }; + ptr as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield_method_mangling.rs b/bindgen-tests/tests/expectations/tests/bitfield_method_mangling.rs index 2bb54f326a..a4b2dc009b 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield_method_mangling.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield_method_mangling.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct mach_msg_type_descriptor_t { @@ -235,13 +506,15 @@ const _: () = { impl mach_msg_type_descriptor_t { #[inline] pub fn pad3(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 24u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 24u8>() as u32) + } } #[inline] pub fn set_pad3(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 24u8, val as u64) + self._bitfield_1.set_const::<0usize, 24u8>(val as u64) } } #[inline] @@ -250,8 +523,10 @@ impl mach_msg_type_descriptor_t { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 24u8) - as u32, + >>::raw_get_const::< + 0usize, + 24u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -261,23 +536,23 @@ impl mach_msg_type_descriptor_t { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 24u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn type_(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 8u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<24usize, 8u8>() as u32) + } } #[inline] pub fn set_type(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(24usize, 8u8, val as u64) + self._bitfield_1.set_const::<24usize, 8u8>(val as u64) } } #[inline] @@ -286,8 +561,10 @@ impl mach_msg_type_descriptor_t { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 24usize, 8u8) - as u32, + >>::raw_get_const::< + 24usize, + 8u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -297,12 +574,10 @@ impl mach_msg_type_descriptor_t { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 24usize, 8u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -312,23 +587,21 @@ impl mach_msg_type_descriptor_t { ) -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 24u8, - { - let pad3: u32 = unsafe { ::std::mem::transmute(pad3) }; - pad3 as u64 - }, - ); + >({ + let pad3: u32 = unsafe { ::std::mem::transmute(pad3) }; + pad3 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 24usize, 8u8, - { - let type_: u32 = unsafe { ::std::mem::transmute(type_) }; - type_ as u64 - }, - ); + >({ + let type_: u32 = unsafe { ::std::mem::transmute(type_) }; + type_ as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield_pack_offset.rs b/bindgen-tests/tests/expectations/tests/bitfield_pack_offset.rs index dad536203f..2b0081ae56 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield_pack_offset.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield_pack_offset.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct A { @@ -248,13 +519,15 @@ impl Default for A { impl A { #[inline] pub fn firmness(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 4u8>() as u8) + } } #[inline] pub fn set_firmness(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 4u8, val as u64) + self._bitfield_1.set_const::<0usize, 4u8>(val as u64) } } #[inline] @@ -263,7 +536,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 4u8) as u8, + >>::raw_get_const::< + 0usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -273,23 +549,23 @@ impl A { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn color(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 4u8>() as u8) + } } #[inline] pub fn set_color(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 4u8, val as u64) + self._bitfield_1.set_const::<4usize, 4u8>(val as u64) } } #[inline] @@ -298,7 +574,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 4u8) as u8, + >>::raw_get_const::< + 4usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -308,23 +587,23 @@ impl A { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn weedsBonus(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 3u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<8usize, 3u8>() as u16) + } } #[inline] pub fn set_weedsBonus(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(8usize, 3u8, val as u64) + self._bitfield_1.set_const::<8usize, 3u8>(val as u64) } } #[inline] @@ -333,8 +612,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 8usize, 3u8) - as u16, + >>::raw_get_const::< + 8usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -344,23 +625,23 @@ impl A { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 8usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn pestsBonus(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(11usize, 3u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<11usize, 3u8>() as u16) + } } #[inline] pub fn set_pestsBonus(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(11usize, 3u8, val as u64) + self._bitfield_1.set_const::<11usize, 3u8>(val as u64) } } #[inline] @@ -369,8 +650,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 11usize, 3u8) - as u16, + >>::raw_get_const::< + 11usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -380,23 +663,23 @@ impl A { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 11usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn size(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(14usize, 10u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<14usize, 10u8>() as u16) + } } #[inline] pub fn set_size(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(14usize, 10u8, val as u64) + self._bitfield_1.set_const::<14usize, 10u8>(val as u64) } } #[inline] @@ -405,8 +688,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 14usize, 10u8) - as u16, + >>::raw_get_const::< + 14usize, + 10u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -416,12 +701,10 @@ impl A { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 14usize, 10u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -434,61 +717,58 @@ impl A { ) -> __BindgenBitfieldUnit<[u8; 3usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 3usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 4u8, - { - let firmness: u8 = unsafe { ::std::mem::transmute(firmness) }; - firmness as u64 - }, - ); + >({ + let firmness: u8 = unsafe { ::std::mem::transmute(firmness) }; + firmness as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 4u8, - { - let color: u8 = unsafe { ::std::mem::transmute(color) }; - color as u64 - }, - ); + >({ + let color: u8 = unsafe { ::std::mem::transmute(color) }; + color as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 8usize, 3u8, - { - let weedsBonus: u16 = unsafe { ::std::mem::transmute(weedsBonus) }; - weedsBonus as u64 - }, - ); + >({ + let weedsBonus: u16 = unsafe { ::std::mem::transmute(weedsBonus) }; + weedsBonus as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 11usize, 3u8, - { - let pestsBonus: u16 = unsafe { ::std::mem::transmute(pestsBonus) }; - pestsBonus as u64 - }, - ); + >({ + let pestsBonus: u16 = unsafe { ::std::mem::transmute(pestsBonus) }; + pestsBonus as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 14usize, 10u8, - { - let size: u16 = unsafe { ::std::mem::transmute(size) }; - size as u64 - }, - ); + >({ + let size: u16 = unsafe { ::std::mem::transmute(size) }; + size as u64 + }); __bindgen_bitfield_unit } #[inline] pub fn minYield(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_2.get(0usize, 4u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<0usize, 4u8>() as u8) + } } #[inline] pub fn set_minYield(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_2.set(0usize, 4u8, val as u64) + self._bitfield_2.set_const::<0usize, 4u8>(val as u64) } } #[inline] @@ -497,7 +777,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 0usize, 4u8) as u8, + >>::raw_get_const::< + 0usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u8, ) } } @@ -507,23 +790,23 @@ impl A { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 0usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn waterBonus(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_2.get(4usize, 4u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<4usize, 4u8>() as u8) + } } #[inline] pub fn set_waterBonus(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_2.set(4usize, 4u8, val as u64) + self._bitfield_2.set_const::<4usize, 4u8>(val as u64) } } #[inline] @@ -532,7 +815,10 @@ impl A { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 4usize, 4u8) as u8, + >>::raw_get_const::< + 4usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u8, ) } } @@ -542,12 +828,10 @@ impl A { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 4usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] @@ -557,23 +841,21 @@ impl A { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 4u8, - { - let minYield: u8 = unsafe { ::std::mem::transmute(minYield) }; - minYield as u64 - }, - ); + >({ + let minYield: u8 = unsafe { ::std::mem::transmute(minYield) }; + minYield as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 4u8, - { - let waterBonus: u8 = unsafe { ::std::mem::transmute(waterBonus) }; - waterBonus as u64 - }, - ); + >({ + let waterBonus: u8 = unsafe { ::std::mem::transmute(waterBonus) }; + waterBonus as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/bitfield_pragma_packed.rs b/bindgen-tests/tests/expectations/tests/bitfield_pragma_packed.rs index 54186cb33e..fd16e0d41d 100644 --- a/bindgen-tests/tests/expectations/tests/bitfield_pragma_packed.rs +++ b/bindgen-tests/tests/expectations/tests/bitfield_pragma_packed.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct Struct { @@ -230,13 +501,15 @@ const _: () = { impl Struct { #[inline] pub fn a(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_a(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -245,7 +518,10 @@ impl Struct { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -255,23 +531,23 @@ impl Struct { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u8) + } } #[inline] pub fn set_b(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -280,7 +556,10 @@ impl Struct { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) as u8, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -290,23 +569,23 @@ impl Struct { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn c(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 6u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 6u8>() as u8) + } } #[inline] pub fn set_c(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 6u8, val as u64) + self._bitfield_1.set_const::<2usize, 6u8>(val as u64) } } #[inline] @@ -315,7 +594,10 @@ impl Struct { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 6u8) as u8, + >>::raw_get_const::< + 2usize, + 6u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -325,23 +607,23 @@ impl Struct { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 6u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn d(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 16u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<8usize, 16u8>() as u16) + } } #[inline] pub fn set_d(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(8usize, 16u8, val as u64) + self._bitfield_1.set_const::<8usize, 16u8>(val as u64) } } #[inline] @@ -350,8 +632,10 @@ impl Struct { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 8usize, 16u8) - as u16, + >>::raw_get_const::< + 8usize, + 16u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -361,23 +645,23 @@ impl Struct { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 8usize, 16u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn e(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 8u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<24usize, 8u8>() as u8) + } } #[inline] pub fn set_e(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(24usize, 8u8, val as u64) + self._bitfield_1.set_const::<24usize, 8u8>(val as u64) } } #[inline] @@ -386,8 +670,10 @@ impl Struct { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 24usize, 8u8) - as u8, + >>::raw_get_const::< + 24usize, + 8u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -397,12 +683,10 @@ impl Struct { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 24usize, 8u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -415,50 +699,45 @@ impl Struct { ) -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u8 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u8 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let b: u8 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 6u8, - { - let c: u8 = unsafe { ::std::mem::transmute(c) }; - c as u64 - }, - ); + >({ + let c: u8 = unsafe { ::std::mem::transmute(c) }; + c as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 8usize, 16u8, - { - let d: u16 = unsafe { ::std::mem::transmute(d) }; - d as u64 - }, - ); + >({ + let d: u16 = unsafe { ::std::mem::transmute(d) }; + d as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 24usize, 8u8, - { - let e: u8 = unsafe { ::std::mem::transmute(e) }; - e as u64 - }, - ); + >({ + let e: u8 = unsafe { ::std::mem::transmute(e) }; + e as u64 + }); __bindgen_bitfield_unit } } @@ -476,13 +755,15 @@ const _: () = { impl Inner { #[inline] pub fn a(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 16u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 16u8>() as u16) + } } #[inline] pub fn set_a(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 16u8, val as u64) + self._bitfield_1.set_const::<0usize, 16u8>(val as u64) } } #[inline] @@ -491,8 +772,10 @@ impl Inner { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 16u8) - as u16, + >>::raw_get_const::< + 0usize, + 16u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -502,23 +785,23 @@ impl Inner { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 16u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 16u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<16usize, 16u8>() as u16) + } } #[inline] pub fn set_b(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 16u8, val as u64) + self._bitfield_1.set_const::<16usize, 16u8>(val as u64) } } #[inline] @@ -527,8 +810,10 @@ impl Inner { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 16usize, 16u8) - as u16, + >>::raw_get_const::< + 16usize, + 16u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -538,12 +823,10 @@ impl Inner { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 16usize, 16u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -553,23 +836,21 @@ impl Inner { ) -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 16u8, - { - let a: u16 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u16 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 16u8, - { - let b: u16 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u16 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/blocklist_bitfield_unit.rs b/bindgen-tests/tests/expectations/tests/blocklist_bitfield_unit.rs index 77c263e3cc..91d34b9a64 100644 --- a/bindgen-tests/tests/expectations/tests/blocklist_bitfield_unit.rs +++ b/bindgen-tests/tests/expectations/tests/blocklist_bitfield_unit.rs @@ -19,13 +19,15 @@ const _: () = { impl C { #[inline] pub fn b1(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u32) + } } #[inline] pub fn set_b1(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -34,8 +36,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u32, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -45,23 +49,23 @@ impl C { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b2(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u32) + } } #[inline] pub fn set_b2(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -70,8 +74,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) - as u32, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -81,12 +87,10 @@ impl C { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -96,23 +100,21 @@ impl C { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let b1: u32 = unsafe { ::std::mem::transmute(b1) }; - b1 as u64 - }, - ); + >({ + let b1: u32 = unsafe { ::std::mem::transmute(b1) }; + b1 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let b2: u32 = unsafe { ::std::mem::transmute(b2) }; - b2 as u64 - }, - ); + >({ + let b2: u32 = unsafe { ::std::mem::transmute(b2) }; + b2 as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/default_visibility_crate.rs b/bindgen-tests/tests/expectations/tests/default_visibility_crate.rs index 68f26e5f3b..4525a07711 100644 --- a/bindgen-tests/tests/expectations/tests/default_visibility_crate.rs +++ b/bindgen-tests/tests/expectations/tests/default_visibility_crate.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct Point { @@ -231,13 +502,15 @@ pub struct Color { impl Color { #[inline] pub(crate) fn r(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub(crate) fn set_r(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -246,7 +519,10 @@ impl Color { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -256,23 +532,23 @@ impl Color { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub(crate) fn g(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u8) + } } #[inline] pub(crate) fn set_g(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -281,7 +557,10 @@ impl Color { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) as u8, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -291,23 +570,23 @@ impl Color { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub(crate) fn b(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u8) + } } #[inline] pub(crate) fn set_b(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -316,7 +595,10 @@ impl Color { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) as u8, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -326,12 +608,10 @@ impl Color { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -342,32 +622,29 @@ impl Color { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let r: u8 = unsafe { ::std::mem::transmute(r) }; - r as u64 - }, - ); + >({ + let r: u8 = unsafe { ::std::mem::transmute(r) }; + r as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let g: u8 = unsafe { ::std::mem::transmute(g) }; - g as u64 - }, - ); + >({ + let g: u8 = unsafe { ::std::mem::transmute(g) }; + g as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let b: u8 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/default_visibility_private.rs b/bindgen-tests/tests/expectations/tests/default_visibility_private.rs index 7397db3baf..646eadfed1 100644 --- a/bindgen-tests/tests/expectations/tests/default_visibility_private.rs +++ b/bindgen-tests/tests/expectations/tests/default_visibility_private.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct Point { @@ -231,13 +502,15 @@ pub struct Color { impl Color { #[inline] fn r(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] fn set_r(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -246,7 +519,10 @@ impl Color { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -256,23 +532,23 @@ impl Color { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] fn g(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u8) + } } #[inline] fn set_g(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -281,7 +557,10 @@ impl Color { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) as u8, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -291,23 +570,23 @@ impl Color { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] fn b(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u8) + } } #[inline] fn set_b(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -316,7 +595,10 @@ impl Color { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) as u8, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -326,12 +608,10 @@ impl Color { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -342,32 +622,29 @@ impl Color { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let r: u8 = unsafe { ::std::mem::transmute(r) }; - r as u64 - }, - ); + >({ + let r: u8 = unsafe { ::std::mem::transmute(r) }; + r as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let g: u8 = unsafe { ::std::mem::transmute(g) }; - g as u64 - }, - ); + >({ + let g: u8 = unsafe { ::std::mem::transmute(g) }; + g as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let b: u8 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/default_visibility_private_respects_cxx_access_spec.rs b/bindgen-tests/tests/expectations/tests/default_visibility_private_respects_cxx_access_spec.rs index 4e4d8f1d6c..ba8ff7d004 100644 --- a/bindgen-tests/tests/expectations/tests/default_visibility_private_respects_cxx_access_spec.rs +++ b/bindgen-tests/tests/expectations/tests/default_visibility_private_respects_cxx_access_spec.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct Point { @@ -231,13 +502,15 @@ pub struct Color { impl Color { #[inline] pub fn r(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_r(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -246,7 +519,10 @@ impl Color { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -256,23 +532,23 @@ impl Color { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn g(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u8) + } } #[inline] pub fn set_g(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -281,7 +557,10 @@ impl Color { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) as u8, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -291,23 +570,23 @@ impl Color { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u8) + } } #[inline] pub fn set_b(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -316,7 +595,10 @@ impl Color { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) as u8, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -326,12 +608,10 @@ impl Color { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -342,32 +622,29 @@ impl Color { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let r: u8 = unsafe { ::std::mem::transmute(r) }; - r as u64 - }, - ); + >({ + let r: u8 = unsafe { ::std::mem::transmute(r) }; + r as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let g: u8 = unsafe { ::std::mem::transmute(g) }; - g as u64 - }, - ); + >({ + let g: u8 = unsafe { ::std::mem::transmute(g) }; + g as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let b: u8 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/derive-bitfield-method-same-name.rs b/bindgen-tests/tests/expectations/tests/derive-bitfield-method-same-name.rs index 2d41f391c2..7b291fa26f 100644 --- a/bindgen-tests/tests/expectations/tests/derive-bitfield-method-same-name.rs +++ b/bindgen-tests/tests/expectations/tests/derive-bitfield-method-same-name.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} /** Because this struct have array larger than 32 items and --with-derive-partialeq --impl-partialeq --impl-debug is provided, this struct should manually implement `Debug` and `PartialEq`.*/ @@ -257,13 +528,15 @@ impl Default for Foo { impl Foo { #[inline] pub fn type__bindgen_bitfield(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 3u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 3u8>() as u8) + } } #[inline] pub fn set_type__bindgen_bitfield(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 3u8, val as u64) + self._bitfield_1.set_const::<0usize, 3u8>(val as u64) } } #[inline] @@ -274,7 +547,10 @@ impl Foo { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 3u8) as u8, + >>::raw_get_const::< + 0usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -287,12 +563,10 @@ impl Foo { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -301,16 +575,15 @@ impl Foo { ) -> __BindgenBitfieldUnit<[u8; 2usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 2usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 3u8, - { - let type__bindgen_bitfield: u8 = unsafe { - ::std::mem::transmute(type__bindgen_bitfield) - }; - type__bindgen_bitfield as u64 - }, - ); + >({ + let type__bindgen_bitfield: u8 = unsafe { + ::std::mem::transmute(type__bindgen_bitfield) + }; + type__bindgen_bitfield as u64 + }); __bindgen_bitfield_unit } #[inline] diff --git a/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-1-51.rs b/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-1-51.rs index f88689d284..33752b15fa 100644 --- a/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-1-51.rs +++ b/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-1-51.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct C { @@ -241,13 +512,15 @@ impl Default for C { impl C { #[inline] pub fn a(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_a(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -256,7 +529,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -266,23 +542,23 @@ impl C { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 7u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 7u8>() as u8) + } } #[inline] pub fn set_b(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 7u8, val as u64) + self._bitfield_1.set_const::<1usize, 7u8>(val as u64) } } #[inline] @@ -291,7 +567,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 7u8) as u8, + >>::raw_get_const::< + 1usize, + 7u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -301,35 +580,31 @@ impl C { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 7u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(a: bool, b: bool) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u8 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u8 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 7u8, - { - let b: u8 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-core.rs b/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-core.rs index ba6c38706a..e21a809b88 100644 --- a/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-core.rs +++ b/bindgen-tests/tests/expectations/tests/derive-debug-bitfield-core.rs @@ -218,6 +218,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct C { @@ -244,13 +515,15 @@ impl Default for C { impl C { #[inline] pub fn a(&self) -> bool { - unsafe { ::core::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::core::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_a(&mut self, val: bool) { unsafe { let val: u8 = ::core::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -259,8 +532,10 @@ impl C { ::core::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::core::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::core::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -270,23 +545,23 @@ impl C { let val: u8 = ::core::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::core::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::core::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> bool { - unsafe { ::core::mem::transmute(self._bitfield_1.get(1usize, 7u8) as u8) } + unsafe { + ::core::mem::transmute(self._bitfield_1.get_const::<1usize, 7u8>() as u8) + } } #[inline] pub fn set_b(&mut self, val: bool) { unsafe { let val: u8 = ::core::mem::transmute(val); - self._bitfield_1.set(1usize, 7u8, val as u64) + self._bitfield_1.set_const::<1usize, 7u8>(val as u64) } } #[inline] @@ -295,8 +570,10 @@ impl C { ::core::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::core::ptr::addr_of!((*this)._bitfield_1), 1usize, 7u8) - as u8, + >>::raw_get_const::< + 1usize, + 7u8, + >(::core::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -306,35 +583,31 @@ impl C { let val: u8 = ::core::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::core::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 7u8, - val as u64, - ) + >(::core::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(a: bool, b: bool) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u8 = unsafe { ::core::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u8 = unsafe { ::core::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 7u8, - { - let b: u8 = unsafe { ::core::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::core::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/derive-debug-bitfield.rs b/bindgen-tests/tests/expectations/tests/derive-debug-bitfield.rs index f88689d284..33752b15fa 100644 --- a/bindgen-tests/tests/expectations/tests/derive-debug-bitfield.rs +++ b/bindgen-tests/tests/expectations/tests/derive-debug-bitfield.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct C { @@ -241,13 +512,15 @@ impl Default for C { impl C { #[inline] pub fn a(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_a(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -256,7 +529,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -266,23 +542,23 @@ impl C { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 7u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 7u8>() as u8) + } } #[inline] pub fn set_b(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 7u8, val as u64) + self._bitfield_1.set_const::<1usize, 7u8>(val as u64) } } #[inline] @@ -291,7 +567,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 7u8) as u8, + >>::raw_get_const::< + 1usize, + 7u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -301,35 +580,31 @@ impl C { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 7u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(a: bool, b: bool) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u8 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u8 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 7u8, - { - let b: u8 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/derive-partialeq-bitfield.rs b/bindgen-tests/tests/expectations/tests/derive-partialeq-bitfield.rs index 3375049d91..47d6808474 100644 --- a/bindgen-tests/tests/expectations/tests/derive-partialeq-bitfield.rs +++ b/bindgen-tests/tests/expectations/tests/derive-partialeq-bitfield.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Copy, Clone, PartialEq)] pub struct C { @@ -241,13 +512,15 @@ impl Default for C { impl C { #[inline] pub fn a(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_a(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -256,7 +529,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -266,23 +542,23 @@ impl C { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 7u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 7u8>() as u8) + } } #[inline] pub fn set_b(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 7u8, val as u64) + self._bitfield_1.set_const::<1usize, 7u8>(val as u64) } } #[inline] @@ -291,7 +567,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 7u8) as u8, + >>::raw_get_const::< + 1usize, + 7u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -301,35 +580,31 @@ impl C { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 7u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(a: bool, b: bool) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u8 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u8 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 7u8, - { - let b: u8 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/divide-by-zero-in-struct-layout.rs b/bindgen-tests/tests/expectations/tests/divide-by-zero-in-struct-layout.rs index b7fc127f78..c1a2029373 100644 --- a/bindgen-tests/tests/expectations/tests/divide-by-zero-in-struct-layout.rs +++ b/bindgen-tests/tests/expectations/tests/divide-by-zero-in-struct-layout.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct WithBitfield { diff --git a/bindgen-tests/tests/expectations/tests/field-visibility-callback.rs b/bindgen-tests/tests/expectations/tests/field-visibility-callback.rs index 95b084b828..795f6bec30 100644 --- a/bindgen-tests/tests/expectations/tests/field-visibility-callback.rs +++ b/bindgen-tests/tests/expectations/tests/field-visibility-callback.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct my_struct { @@ -237,13 +508,15 @@ const _: () = { impl my_struct { #[inline] pub fn c(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u32) + } } #[inline] pub fn set_c(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -252,8 +525,10 @@ impl my_struct { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u32, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -263,23 +538,23 @@ impl my_struct { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] fn private_d(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u32) + } } #[inline] fn set_private_d(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -288,8 +563,10 @@ impl my_struct { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) - as u32, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -299,12 +576,10 @@ impl my_struct { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -314,23 +589,21 @@ impl my_struct { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let c: u32 = unsafe { ::std::mem::transmute(c) }; - c as u64 - }, - ); + >({ + let c: u32 = unsafe { ::std::mem::transmute(c) }; + c as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let private_d: u32 = unsafe { ::std::mem::transmute(private_d) }; - private_d as u64 - }, - ); + >({ + let private_d: u32 = unsafe { ::std::mem::transmute(private_d) }; + private_d as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/field-visibility.rs b/bindgen-tests/tests/expectations/tests/field-visibility.rs index 420a8397f6..35645fe61a 100644 --- a/bindgen-tests/tests/expectations/tests/field-visibility.rs +++ b/bindgen-tests/tests/expectations/tests/field-visibility.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct my_struct1 { @@ -232,13 +503,15 @@ const _: () = { impl my_struct1 { #[inline] fn a(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u32) + } } #[inline] fn set_a(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -247,8 +520,10 @@ impl my_struct1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u32, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -258,26 +533,23 @@ impl my_struct1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] fn new_bitfield_1(a: ::std::os::raw::c_int) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u32 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u32 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit } } @@ -296,13 +568,15 @@ const _: () = { impl my_struct2 { #[inline] pub fn a(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u32) + } } #[inline] pub fn set_a(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -311,8 +585,10 @@ impl my_struct2 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u32, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -322,12 +598,10 @@ impl my_struct2 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -336,14 +610,13 @@ impl my_struct2 { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u32 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u32 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/incomplete-array-padding.rs b/bindgen-tests/tests/expectations/tests/incomplete-array-padding.rs index b3f8b8a06a..7e61272b36 100644 --- a/bindgen-tests/tests/expectations/tests/incomplete-array-padding.rs +++ b/bindgen-tests/tests/expectations/tests/incomplete-array-padding.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Default)] pub struct __IncompleteArrayField(::std::marker::PhantomData, [T; 0]); @@ -271,13 +542,15 @@ impl Default for foo { impl foo { #[inline] pub fn a(&self) -> ::std::os::raw::c_char { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_a(&mut self, val: ::std::os::raw::c_char) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -286,7 +559,10 @@ impl foo { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -296,12 +572,10 @@ impl foo { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -310,14 +584,13 @@ impl foo { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u8 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u8 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/issue-1034.rs b/bindgen-tests/tests/expectations/tests/issue-1034.rs index 87512f0ed1..2d8b7e23d8 100644 --- a/bindgen-tests/tests/expectations/tests/issue-1034.rs +++ b/bindgen-tests/tests/expectations/tests/issue-1034.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct S2 { diff --git a/bindgen-tests/tests/expectations/tests/issue-1076-unnamed-bitfield-alignment.rs b/bindgen-tests/tests/expectations/tests/issue-1076-unnamed-bitfield-alignment.rs index 59e54c9855..505b29b655 100644 --- a/bindgen-tests/tests/expectations/tests/issue-1076-unnamed-bitfield-alignment.rs +++ b/bindgen-tests/tests/expectations/tests/issue-1076-unnamed-bitfield-alignment.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct S1 { diff --git a/bindgen-tests/tests/expectations/tests/issue-1947.rs b/bindgen-tests/tests/expectations/tests/issue-1947.rs index 7ffb728c5d..cc3763057a 100644 --- a/bindgen-tests/tests/expectations/tests/issue-1947.rs +++ b/bindgen-tests/tests/expectations/tests/issue-1947.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} pub type U8 = ::std::os::raw::c_uchar; pub type U16 = ::std::os::raw::c_ushort; #[repr(C)] @@ -240,13 +511,15 @@ const _: () = { impl V56AMDY { #[inline] pub fn MADZ(&self) -> U16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 10u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 10u8>() as u16) + } } #[inline] pub fn set_MADZ(&mut self, val: U16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 10u8, val as u64) + self._bitfield_1.set_const::<0usize, 10u8>(val as u64) } } #[inline] @@ -255,8 +528,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 10u8) - as u16, + >>::raw_get_const::< + 0usize, + 10u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -266,23 +541,23 @@ impl V56AMDY { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 10u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn MAI0(&self) -> U16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(10usize, 2u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<10usize, 2u8>() as u16) + } } #[inline] pub fn set_MAI0(&mut self, val: U16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(10usize, 2u8, val as u64) + self._bitfield_1.set_const::<10usize, 2u8>(val as u64) } } #[inline] @@ -291,8 +566,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 10usize, 2u8) - as u16, + >>::raw_get_const::< + 10usize, + 2u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -302,23 +579,23 @@ impl V56AMDY { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 10usize, 2u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn MAI1(&self) -> U16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(12usize, 2u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<12usize, 2u8>() as u16) + } } #[inline] pub fn set_MAI1(&mut self, val: U16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(12usize, 2u8, val as u64) + self._bitfield_1.set_const::<12usize, 2u8>(val as u64) } } #[inline] @@ -327,8 +604,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 12usize, 2u8) - as u16, + >>::raw_get_const::< + 12usize, + 2u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -338,23 +617,23 @@ impl V56AMDY { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 12usize, 2u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn MAI2(&self) -> U16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(14usize, 2u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<14usize, 2u8>() as u16) + } } #[inline] pub fn set_MAI2(&mut self, val: U16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(14usize, 2u8, val as u64) + self._bitfield_1.set_const::<14usize, 2u8>(val as u64) } } #[inline] @@ -363,8 +642,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 14usize, 2u8) - as u16, + >>::raw_get_const::< + 14usize, + 2u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -374,12 +655,10 @@ impl V56AMDY { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 14usize, 2u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -391,52 +670,50 @@ impl V56AMDY { ) -> __BindgenBitfieldUnit<[u8; 2usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 2usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 10u8, - { - let MADZ: u16 = unsafe { ::std::mem::transmute(MADZ) }; - MADZ as u64 - }, - ); + >({ + let MADZ: u16 = unsafe { ::std::mem::transmute(MADZ) }; + MADZ as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 10usize, 2u8, - { - let MAI0: u16 = unsafe { ::std::mem::transmute(MAI0) }; - MAI0 as u64 - }, - ); + >({ + let MAI0: u16 = unsafe { ::std::mem::transmute(MAI0) }; + MAI0 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 12usize, 2u8, - { - let MAI1: u16 = unsafe { ::std::mem::transmute(MAI1) }; - MAI1 as u64 - }, - ); + >({ + let MAI1: u16 = unsafe { ::std::mem::transmute(MAI1) }; + MAI1 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 14usize, 2u8, - { - let MAI2: u16 = unsafe { ::std::mem::transmute(MAI2) }; - MAI2 as u64 - }, - ); + >({ + let MAI2: u16 = unsafe { ::std::mem::transmute(MAI2) }; + MAI2 as u64 + }); __bindgen_bitfield_unit } #[inline] pub fn MATH(&self) -> U16 { - unsafe { ::std::mem::transmute(self._bitfield_2.get(0usize, 10u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<0usize, 10u8>() as u16) + } } #[inline] pub fn set_MATH(&mut self, val: U16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_2.set(0usize, 10u8, val as u64) + self._bitfield_2.set_const::<0usize, 10u8>(val as u64) } } #[inline] @@ -445,8 +722,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 0usize, 10u8) - as u16, + >>::raw_get_const::< + 0usize, + 10u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u16, ) } } @@ -456,23 +735,23 @@ impl V56AMDY { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 0usize, 10u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn MATE(&self) -> U16 { - unsafe { ::std::mem::transmute(self._bitfield_2.get(10usize, 4u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<10usize, 4u8>() as u16) + } } #[inline] pub fn set_MATE(&mut self, val: U16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_2.set(10usize, 4u8, val as u64) + self._bitfield_2.set_const::<10usize, 4u8>(val as u64) } } #[inline] @@ -481,8 +760,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 10usize, 4u8) - as u16, + >>::raw_get_const::< + 10usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u16, ) } } @@ -492,23 +773,23 @@ impl V56AMDY { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 10usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn MATW(&self) -> U16 { - unsafe { ::std::mem::transmute(self._bitfield_2.get(14usize, 2u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<14usize, 2u8>() as u16) + } } #[inline] pub fn set_MATW(&mut self, val: U16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_2.set(14usize, 2u8, val as u64) + self._bitfield_2.set_const::<14usize, 2u8>(val as u64) } } #[inline] @@ -517,8 +798,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 14usize, 2u8) - as u16, + >>::raw_get_const::< + 14usize, + 2u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u16, ) } } @@ -528,23 +811,23 @@ impl V56AMDY { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 14usize, 2u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn MASW(&self) -> U8 { - unsafe { ::std::mem::transmute(self._bitfield_2.get(16usize, 4u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<16usize, 4u8>() as u8) + } } #[inline] pub fn set_MASW(&mut self, val: U8) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_2.set(16usize, 4u8, val as u64) + self._bitfield_2.set_const::<16usize, 4u8>(val as u64) } } #[inline] @@ -553,8 +836,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 16usize, 4u8) - as u8, + >>::raw_get_const::< + 16usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u8, ) } } @@ -564,23 +849,23 @@ impl V56AMDY { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 16usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn MABW(&self) -> U8 { - unsafe { ::std::mem::transmute(self._bitfield_2.get(20usize, 3u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<20usize, 3u8>() as u8) + } } #[inline] pub fn set_MABW(&mut self, val: U8) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_2.set(20usize, 3u8, val as u64) + self._bitfield_2.set_const::<20usize, 3u8>(val as u64) } } #[inline] @@ -589,8 +874,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 20usize, 3u8) - as u8, + >>::raw_get_const::< + 20usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u8, ) } } @@ -600,23 +887,23 @@ impl V56AMDY { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 20usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn MAXN(&self) -> U8 { - unsafe { ::std::mem::transmute(self._bitfield_2.get(23usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<23usize, 1u8>() as u8) + } } #[inline] pub fn set_MAXN(&mut self, val: U8) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_2.set(23usize, 1u8, val as u64) + self._bitfield_2.set_const::<23usize, 1u8>(val as u64) } } #[inline] @@ -625,8 +912,10 @@ impl V56AMDY { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 23usize, 1u8) - as u8, + >>::raw_get_const::< + 23usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u8, ) } } @@ -636,12 +925,10 @@ impl V56AMDY { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 23usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] @@ -655,59 +942,53 @@ impl V56AMDY { ) -> __BindgenBitfieldUnit<[u8; 3usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 3usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 10u8, - { - let MATH: u16 = unsafe { ::std::mem::transmute(MATH) }; - MATH as u64 - }, - ); + >({ + let MATH: u16 = unsafe { ::std::mem::transmute(MATH) }; + MATH as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 10usize, 4u8, - { - let MATE: u16 = unsafe { ::std::mem::transmute(MATE) }; - MATE as u64 - }, - ); + >({ + let MATE: u16 = unsafe { ::std::mem::transmute(MATE) }; + MATE as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 14usize, 2u8, - { - let MATW: u16 = unsafe { ::std::mem::transmute(MATW) }; - MATW as u64 - }, - ); + >({ + let MATW: u16 = unsafe { ::std::mem::transmute(MATW) }; + MATW as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 4u8, - { - let MASW: u8 = unsafe { ::std::mem::transmute(MASW) }; - MASW as u64 - }, - ); + >({ + let MASW: u8 = unsafe { ::std::mem::transmute(MASW) }; + MASW as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 20usize, 3u8, - { - let MABW: u8 = unsafe { ::std::mem::transmute(MABW) }; - MABW as u64 - }, - ); + >({ + let MABW: u8 = unsafe { ::std::mem::transmute(MABW) }; + MABW as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 23usize, 1u8, - { - let MAXN: u8 = unsafe { ::std::mem::transmute(MAXN) }; - MAXN as u64 - }, - ); + >({ + let MAXN: u8 = unsafe { ::std::mem::transmute(MAXN) }; + MAXN as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/issue-739-pointer-wide-bitfield.rs b/bindgen-tests/tests/expectations/tests/issue-739-pointer-wide-bitfield.rs index e3c5a76ea7..21d673e278 100644 --- a/bindgen-tests/tests/expectations/tests/issue-739-pointer-wide-bitfield.rs +++ b/bindgen-tests/tests/expectations/tests/issue-739-pointer-wide-bitfield.rs @@ -218,6 +218,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct Foo { @@ -232,13 +503,15 @@ const _: () = { impl Foo { #[inline] pub fn m_bitfield(&self) -> ::std::os::raw::c_ulong { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 64u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 64u8>() as u64) + } } #[inline] pub fn set_m_bitfield(&mut self, val: ::std::os::raw::c_ulong) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 64u8, val as u64) + self._bitfield_1.set_const::<0usize, 64u8>(val as u64) } } #[inline] @@ -247,8 +520,10 @@ impl Foo { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 32usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 64u8) - as u64, + >>::raw_get_const::< + 0usize, + 64u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -258,23 +533,23 @@ impl Foo { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 32usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 64u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn m_bar(&self) -> ::std::os::raw::c_ulong { - unsafe { ::std::mem::transmute(self._bitfield_1.get(64usize, 64u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<64usize, 64u8>() as u64) + } } #[inline] pub fn set_m_bar(&mut self, val: ::std::os::raw::c_ulong) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(64usize, 64u8, val as u64) + self._bitfield_1.set_const::<64usize, 64u8>(val as u64) } } #[inline] @@ -283,8 +558,10 @@ impl Foo { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 32usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 64usize, 64u8) - as u64, + >>::raw_get_const::< + 64usize, + 64u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -294,23 +571,23 @@ impl Foo { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 32usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 64usize, 64u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn foo(&self) -> ::std::os::raw::c_ulong { - unsafe { ::std::mem::transmute(self._bitfield_1.get(128usize, 1u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<128usize, 1u8>() as u64) + } } #[inline] pub fn set_foo(&mut self, val: ::std::os::raw::c_ulong) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(128usize, 1u8, val as u64) + self._bitfield_1.set_const::<128usize, 1u8>(val as u64) } } #[inline] @@ -319,8 +596,10 @@ impl Foo { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 32usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 128usize, 1u8) - as u64, + >>::raw_get_const::< + 128usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -330,23 +609,23 @@ impl Foo { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 32usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 128usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bar(&self) -> ::std::os::raw::c_ulong { - unsafe { ::std::mem::transmute(self._bitfield_1.get(192usize, 64u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<192usize, 64u8>() as u64) + } } #[inline] pub fn set_bar(&mut self, val: ::std::os::raw::c_ulong) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(192usize, 64u8, val as u64) + self._bitfield_1.set_const::<192usize, 64u8>(val as u64) } } #[inline] @@ -355,8 +634,10 @@ impl Foo { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 32usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 192usize, 64u8) - as u64, + >>::raw_get_const::< + 192usize, + 64u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -366,12 +647,10 @@ impl Foo { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 32usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 192usize, 64u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -383,41 +662,37 @@ impl Foo { ) -> __BindgenBitfieldUnit<[u8; 32usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 32usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 64u8, - { - let m_bitfield: u64 = unsafe { ::std::mem::transmute(m_bitfield) }; - m_bitfield as u64 - }, - ); + >({ + let m_bitfield: u64 = unsafe { ::std::mem::transmute(m_bitfield) }; + m_bitfield as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 64usize, 64u8, - { - let m_bar: u64 = unsafe { ::std::mem::transmute(m_bar) }; - m_bar as u64 - }, - ); + >({ + let m_bar: u64 = unsafe { ::std::mem::transmute(m_bar) }; + m_bar as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 128usize, 1u8, - { - let foo: u64 = unsafe { ::std::mem::transmute(foo) }; - foo as u64 - }, - ); + >({ + let foo: u64 = unsafe { ::std::mem::transmute(foo) }; + foo as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 192usize, 64u8, - { - let bar: u64 = unsafe { ::std::mem::transmute(bar) }; - bar as u64 - }, - ); + >({ + let bar: u64 = unsafe { ::std::mem::transmute(bar) }; + bar as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/issue-743.rs b/bindgen-tests/tests/expectations/tests/issue-743.rs index bb8d9153a3..aa69c7ea9a 100644 --- a/bindgen-tests/tests/expectations/tests/issue-743.rs +++ b/bindgen-tests/tests/expectations/tests/issue-743.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct S { @@ -244,13 +515,15 @@ impl Default for S { impl S { #[inline] pub fn u(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 16u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 16u8>() as u32) + } } #[inline] pub fn set_u(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 16u8, val as u64) + self._bitfield_1.set_const::<0usize, 16u8>(val as u64) } } #[inline] @@ -259,8 +532,10 @@ impl S { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 16u8) - as u32, + >>::raw_get_const::< + 0usize, + 16u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -270,12 +545,10 @@ impl S { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 16u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -284,14 +557,13 @@ impl S { ) -> __BindgenBitfieldUnit<[u8; 2usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 2usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 16u8, - { - let u: u32 = unsafe { ::std::mem::transmute(u) }; - u as u64 - }, - ); + >({ + let u: u32 = unsafe { ::std::mem::transmute(u) }; + u as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/issue-816.rs b/bindgen-tests/tests/expectations/tests/issue-816.rs index f2fa4d77d6..a874606da7 100644 --- a/bindgen-tests/tests/expectations/tests/issue-816.rs +++ b/bindgen-tests/tests/expectations/tests/issue-816.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct capabilities { @@ -231,13 +502,15 @@ const _: () = { impl capabilities { #[inline] pub fn bit_1(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_1(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -246,8 +519,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u32, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -257,23 +532,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_2(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_2(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -282,8 +557,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) - as u32, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -293,23 +570,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_3(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_3(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -318,8 +595,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) - as u32, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -329,23 +608,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_4(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<3usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_4(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(3usize, 1u8, val as u64) + self._bitfield_1.set_const::<3usize, 1u8>(val as u64) } } #[inline] @@ -354,8 +633,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 3usize, 1u8) - as u32, + >>::raw_get_const::< + 3usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -365,23 +646,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 3usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_5(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_5(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 1u8, val as u64) + self._bitfield_1.set_const::<4usize, 1u8>(val as u64) } } #[inline] @@ -390,8 +671,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 1u8) - as u32, + >>::raw_get_const::< + 4usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -401,23 +684,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_6(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<5usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_6(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(5usize, 1u8, val as u64) + self._bitfield_1.set_const::<5usize, 1u8>(val as u64) } } #[inline] @@ -426,8 +709,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 5usize, 1u8) - as u32, + >>::raw_get_const::< + 5usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -437,23 +722,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 5usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_7(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<6usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_7(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(6usize, 1u8, val as u64) + self._bitfield_1.set_const::<6usize, 1u8>(val as u64) } } #[inline] @@ -462,8 +747,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 6usize, 1u8) - as u32, + >>::raw_get_const::< + 6usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -473,23 +760,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 6usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_8(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(7usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<7usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_8(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(7usize, 1u8, val as u64) + self._bitfield_1.set_const::<7usize, 1u8>(val as u64) } } #[inline] @@ -498,8 +785,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 7usize, 1u8) - as u32, + >>::raw_get_const::< + 7usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -509,23 +798,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 7usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_9(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<8usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_9(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(8usize, 1u8, val as u64) + self._bitfield_1.set_const::<8usize, 1u8>(val as u64) } } #[inline] @@ -534,8 +823,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 8usize, 1u8) - as u32, + >>::raw_get_const::< + 8usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -545,23 +836,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 8usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_10(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(9usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<9usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_10(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(9usize, 1u8, val as u64) + self._bitfield_1.set_const::<9usize, 1u8>(val as u64) } } #[inline] @@ -570,8 +861,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 9usize, 1u8) - as u32, + >>::raw_get_const::< + 9usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -581,23 +874,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 9usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_11(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(10usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<10usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_11(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(10usize, 1u8, val as u64) + self._bitfield_1.set_const::<10usize, 1u8>(val as u64) } } #[inline] @@ -606,8 +899,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 10usize, 1u8) - as u32, + >>::raw_get_const::< + 10usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -617,23 +912,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 10usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_12(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(11usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<11usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_12(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(11usize, 1u8, val as u64) + self._bitfield_1.set_const::<11usize, 1u8>(val as u64) } } #[inline] @@ -642,8 +937,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 11usize, 1u8) - as u32, + >>::raw_get_const::< + 11usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -653,23 +950,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 11usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_13(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(12usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<12usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_13(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(12usize, 1u8, val as u64) + self._bitfield_1.set_const::<12usize, 1u8>(val as u64) } } #[inline] @@ -678,8 +975,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 12usize, 1u8) - as u32, + >>::raw_get_const::< + 12usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -689,23 +988,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 12usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_14(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(13usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<13usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_14(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(13usize, 1u8, val as u64) + self._bitfield_1.set_const::<13usize, 1u8>(val as u64) } } #[inline] @@ -714,8 +1013,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 13usize, 1u8) - as u32, + >>::raw_get_const::< + 13usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -725,23 +1026,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 13usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_15(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(14usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<14usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_15(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(14usize, 1u8, val as u64) + self._bitfield_1.set_const::<14usize, 1u8>(val as u64) } } #[inline] @@ -750,8 +1051,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 14usize, 1u8) - as u32, + >>::raw_get_const::< + 14usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -761,23 +1064,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 14usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_16(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(15usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<15usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_16(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(15usize, 1u8, val as u64) + self._bitfield_1.set_const::<15usize, 1u8>(val as u64) } } #[inline] @@ -786,8 +1089,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 15usize, 1u8) - as u32, + >>::raw_get_const::< + 15usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -797,23 +1102,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 15usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_17(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<16usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_17(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 1u8, val as u64) + self._bitfield_1.set_const::<16usize, 1u8>(val as u64) } } #[inline] @@ -822,8 +1127,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 16usize, 1u8) - as u32, + >>::raw_get_const::< + 16usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -833,23 +1140,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 16usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_18(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(17usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<17usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_18(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(17usize, 1u8, val as u64) + self._bitfield_1.set_const::<17usize, 1u8>(val as u64) } } #[inline] @@ -858,8 +1165,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 17usize, 1u8) - as u32, + >>::raw_get_const::< + 17usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -869,23 +1178,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 17usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_19(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(18usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<18usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_19(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(18usize, 1u8, val as u64) + self._bitfield_1.set_const::<18usize, 1u8>(val as u64) } } #[inline] @@ -894,8 +1203,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 18usize, 1u8) - as u32, + >>::raw_get_const::< + 18usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -905,23 +1216,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 18usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_20(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(19usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<19usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_20(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(19usize, 1u8, val as u64) + self._bitfield_1.set_const::<19usize, 1u8>(val as u64) } } #[inline] @@ -930,8 +1241,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 19usize, 1u8) - as u32, + >>::raw_get_const::< + 19usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -941,23 +1254,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 19usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_21(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(20usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<20usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_21(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(20usize, 1u8, val as u64) + self._bitfield_1.set_const::<20usize, 1u8>(val as u64) } } #[inline] @@ -966,8 +1279,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 20usize, 1u8) - as u32, + >>::raw_get_const::< + 20usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -977,23 +1292,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 20usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_22(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(21usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<21usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_22(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(21usize, 1u8, val as u64) + self._bitfield_1.set_const::<21usize, 1u8>(val as u64) } } #[inline] @@ -1002,8 +1317,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 21usize, 1u8) - as u32, + >>::raw_get_const::< + 21usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1013,23 +1330,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 21usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_23(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(22usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<22usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_23(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(22usize, 1u8, val as u64) + self._bitfield_1.set_const::<22usize, 1u8>(val as u64) } } #[inline] @@ -1038,8 +1355,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 22usize, 1u8) - as u32, + >>::raw_get_const::< + 22usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1049,23 +1368,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 22usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_24(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(23usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<23usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_24(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(23usize, 1u8, val as u64) + self._bitfield_1.set_const::<23usize, 1u8>(val as u64) } } #[inline] @@ -1074,8 +1393,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 23usize, 1u8) - as u32, + >>::raw_get_const::< + 23usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1085,23 +1406,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 23usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_25(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<24usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_25(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(24usize, 1u8, val as u64) + self._bitfield_1.set_const::<24usize, 1u8>(val as u64) } } #[inline] @@ -1110,8 +1431,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 24usize, 1u8) - as u32, + >>::raw_get_const::< + 24usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1121,23 +1444,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 24usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_26(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(25usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<25usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_26(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(25usize, 1u8, val as u64) + self._bitfield_1.set_const::<25usize, 1u8>(val as u64) } } #[inline] @@ -1146,8 +1469,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 25usize, 1u8) - as u32, + >>::raw_get_const::< + 25usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1157,23 +1482,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 25usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_27(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(26usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<26usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_27(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(26usize, 1u8, val as u64) + self._bitfield_1.set_const::<26usize, 1u8>(val as u64) } } #[inline] @@ -1182,8 +1507,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 26usize, 1u8) - as u32, + >>::raw_get_const::< + 26usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1193,23 +1520,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 26usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_28(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(27usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<27usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_28(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(27usize, 1u8, val as u64) + self._bitfield_1.set_const::<27usize, 1u8>(val as u64) } } #[inline] @@ -1218,8 +1545,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 27usize, 1u8) - as u32, + >>::raw_get_const::< + 27usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1229,23 +1558,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 27usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_29(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(28usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<28usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_29(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(28usize, 1u8, val as u64) + self._bitfield_1.set_const::<28usize, 1u8>(val as u64) } } #[inline] @@ -1254,8 +1583,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 28usize, 1u8) - as u32, + >>::raw_get_const::< + 28usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1265,23 +1596,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 28usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_30(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(29usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<29usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_30(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(29usize, 1u8, val as u64) + self._bitfield_1.set_const::<29usize, 1u8>(val as u64) } } #[inline] @@ -1290,8 +1621,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 29usize, 1u8) - as u32, + >>::raw_get_const::< + 29usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1301,23 +1634,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 29usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_31(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(30usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<30usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_31(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(30usize, 1u8, val as u64) + self._bitfield_1.set_const::<30usize, 1u8>(val as u64) } } #[inline] @@ -1326,8 +1659,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 30usize, 1u8) - as u32, + >>::raw_get_const::< + 30usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1337,23 +1672,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 30usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_32(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(31usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<31usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_32(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(31usize, 1u8, val as u64) + self._bitfield_1.set_const::<31usize, 1u8>(val as u64) } } #[inline] @@ -1362,8 +1697,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 31usize, 1u8) - as u32, + >>::raw_get_const::< + 31usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1373,23 +1710,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 31usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_33(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(32usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<32usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_33(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(32usize, 1u8, val as u64) + self._bitfield_1.set_const::<32usize, 1u8>(val as u64) } } #[inline] @@ -1398,8 +1735,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 32usize, 1u8) - as u32, + >>::raw_get_const::< + 32usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1409,23 +1748,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 32usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_34(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(33usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<33usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_34(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(33usize, 1u8, val as u64) + self._bitfield_1.set_const::<33usize, 1u8>(val as u64) } } #[inline] @@ -1434,8 +1773,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 33usize, 1u8) - as u32, + >>::raw_get_const::< + 33usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1445,23 +1786,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 33usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_35(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(34usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<34usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_35(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(34usize, 1u8, val as u64) + self._bitfield_1.set_const::<34usize, 1u8>(val as u64) } } #[inline] @@ -1470,8 +1811,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 34usize, 1u8) - as u32, + >>::raw_get_const::< + 34usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1481,23 +1824,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 34usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_36(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(35usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<35usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_36(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(35usize, 1u8, val as u64) + self._bitfield_1.set_const::<35usize, 1u8>(val as u64) } } #[inline] @@ -1506,8 +1849,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 35usize, 1u8) - as u32, + >>::raw_get_const::< + 35usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1517,23 +1862,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 35usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_37(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(36usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<36usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_37(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(36usize, 1u8, val as u64) + self._bitfield_1.set_const::<36usize, 1u8>(val as u64) } } #[inline] @@ -1542,8 +1887,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 36usize, 1u8) - as u32, + >>::raw_get_const::< + 36usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1553,23 +1900,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 36usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_38(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(37usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<37usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_38(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(37usize, 1u8, val as u64) + self._bitfield_1.set_const::<37usize, 1u8>(val as u64) } } #[inline] @@ -1578,8 +1925,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 37usize, 1u8) - as u32, + >>::raw_get_const::< + 37usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1589,23 +1938,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 37usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_39(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(38usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<38usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_39(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(38usize, 1u8, val as u64) + self._bitfield_1.set_const::<38usize, 1u8>(val as u64) } } #[inline] @@ -1614,8 +1963,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 38usize, 1u8) - as u32, + >>::raw_get_const::< + 38usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1625,23 +1976,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 38usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_40(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(39usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<39usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_40(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(39usize, 1u8, val as u64) + self._bitfield_1.set_const::<39usize, 1u8>(val as u64) } } #[inline] @@ -1650,8 +2001,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 39usize, 1u8) - as u32, + >>::raw_get_const::< + 39usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1661,23 +2014,23 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 39usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bit_41(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(40usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<40usize, 1u8>() as u32) + } } #[inline] pub fn set_bit_41(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(40usize, 1u8, val as u64) + self._bitfield_1.set_const::<40usize, 1u8>(val as u64) } } #[inline] @@ -1686,8 +2039,10 @@ impl capabilities { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 40usize, 1u8) - as u32, + >>::raw_get_const::< + 40usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -1697,12 +2052,10 @@ impl capabilities { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 16usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 40usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -1751,374 +2104,333 @@ impl capabilities { ) -> __BindgenBitfieldUnit<[u8; 16usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 16usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let bit_1: u32 = unsafe { ::std::mem::transmute(bit_1) }; - bit_1 as u64 - }, - ); + >({ + let bit_1: u32 = unsafe { ::std::mem::transmute(bit_1) }; + bit_1 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let bit_2: u32 = unsafe { ::std::mem::transmute(bit_2) }; - bit_2 as u64 - }, - ); + >({ + let bit_2: u32 = unsafe { ::std::mem::transmute(bit_2) }; + bit_2 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let bit_3: u32 = unsafe { ::std::mem::transmute(bit_3) }; - bit_3 as u64 - }, - ); + >({ + let bit_3: u32 = unsafe { ::std::mem::transmute(bit_3) }; + bit_3 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 3usize, 1u8, - { - let bit_4: u32 = unsafe { ::std::mem::transmute(bit_4) }; - bit_4 as u64 - }, - ); + >({ + let bit_4: u32 = unsafe { ::std::mem::transmute(bit_4) }; + bit_4 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 1u8, - { - let bit_5: u32 = unsafe { ::std::mem::transmute(bit_5) }; - bit_5 as u64 - }, - ); + >({ + let bit_5: u32 = unsafe { ::std::mem::transmute(bit_5) }; + bit_5 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 5usize, 1u8, - { - let bit_6: u32 = unsafe { ::std::mem::transmute(bit_6) }; - bit_6 as u64 - }, - ); + >({ + let bit_6: u32 = unsafe { ::std::mem::transmute(bit_6) }; + bit_6 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 6usize, 1u8, - { - let bit_7: u32 = unsafe { ::std::mem::transmute(bit_7) }; - bit_7 as u64 - }, - ); + >({ + let bit_7: u32 = unsafe { ::std::mem::transmute(bit_7) }; + bit_7 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 7usize, 1u8, - { - let bit_8: u32 = unsafe { ::std::mem::transmute(bit_8) }; - bit_8 as u64 - }, - ); + >({ + let bit_8: u32 = unsafe { ::std::mem::transmute(bit_8) }; + bit_8 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 8usize, 1u8, - { - let bit_9: u32 = unsafe { ::std::mem::transmute(bit_9) }; - bit_9 as u64 - }, - ); + >({ + let bit_9: u32 = unsafe { ::std::mem::transmute(bit_9) }; + bit_9 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 9usize, 1u8, - { - let bit_10: u32 = unsafe { ::std::mem::transmute(bit_10) }; - bit_10 as u64 - }, - ); + >({ + let bit_10: u32 = unsafe { ::std::mem::transmute(bit_10) }; + bit_10 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 10usize, 1u8, - { - let bit_11: u32 = unsafe { ::std::mem::transmute(bit_11) }; - bit_11 as u64 - }, - ); + >({ + let bit_11: u32 = unsafe { ::std::mem::transmute(bit_11) }; + bit_11 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 11usize, 1u8, - { - let bit_12: u32 = unsafe { ::std::mem::transmute(bit_12) }; - bit_12 as u64 - }, - ); + >({ + let bit_12: u32 = unsafe { ::std::mem::transmute(bit_12) }; + bit_12 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 12usize, 1u8, - { - let bit_13: u32 = unsafe { ::std::mem::transmute(bit_13) }; - bit_13 as u64 - }, - ); + >({ + let bit_13: u32 = unsafe { ::std::mem::transmute(bit_13) }; + bit_13 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 13usize, 1u8, - { - let bit_14: u32 = unsafe { ::std::mem::transmute(bit_14) }; - bit_14 as u64 - }, - ); + >({ + let bit_14: u32 = unsafe { ::std::mem::transmute(bit_14) }; + bit_14 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 14usize, 1u8, - { - let bit_15: u32 = unsafe { ::std::mem::transmute(bit_15) }; - bit_15 as u64 - }, - ); + >({ + let bit_15: u32 = unsafe { ::std::mem::transmute(bit_15) }; + bit_15 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 15usize, 1u8, - { - let bit_16: u32 = unsafe { ::std::mem::transmute(bit_16) }; - bit_16 as u64 - }, - ); + >({ + let bit_16: u32 = unsafe { ::std::mem::transmute(bit_16) }; + bit_16 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 1u8, - { - let bit_17: u32 = unsafe { ::std::mem::transmute(bit_17) }; - bit_17 as u64 - }, - ); + >({ + let bit_17: u32 = unsafe { ::std::mem::transmute(bit_17) }; + bit_17 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 17usize, 1u8, - { - let bit_18: u32 = unsafe { ::std::mem::transmute(bit_18) }; - bit_18 as u64 - }, - ); + >({ + let bit_18: u32 = unsafe { ::std::mem::transmute(bit_18) }; + bit_18 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 18usize, 1u8, - { - let bit_19: u32 = unsafe { ::std::mem::transmute(bit_19) }; - bit_19 as u64 - }, - ); + >({ + let bit_19: u32 = unsafe { ::std::mem::transmute(bit_19) }; + bit_19 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 19usize, 1u8, - { - let bit_20: u32 = unsafe { ::std::mem::transmute(bit_20) }; - bit_20 as u64 - }, - ); + >({ + let bit_20: u32 = unsafe { ::std::mem::transmute(bit_20) }; + bit_20 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 20usize, 1u8, - { - let bit_21: u32 = unsafe { ::std::mem::transmute(bit_21) }; - bit_21 as u64 - }, - ); + >({ + let bit_21: u32 = unsafe { ::std::mem::transmute(bit_21) }; + bit_21 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 21usize, 1u8, - { - let bit_22: u32 = unsafe { ::std::mem::transmute(bit_22) }; - bit_22 as u64 - }, - ); + >({ + let bit_22: u32 = unsafe { ::std::mem::transmute(bit_22) }; + bit_22 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 22usize, 1u8, - { - let bit_23: u32 = unsafe { ::std::mem::transmute(bit_23) }; - bit_23 as u64 - }, - ); + >({ + let bit_23: u32 = unsafe { ::std::mem::transmute(bit_23) }; + bit_23 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 23usize, 1u8, - { - let bit_24: u32 = unsafe { ::std::mem::transmute(bit_24) }; - bit_24 as u64 - }, - ); + >({ + let bit_24: u32 = unsafe { ::std::mem::transmute(bit_24) }; + bit_24 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 24usize, 1u8, - { - let bit_25: u32 = unsafe { ::std::mem::transmute(bit_25) }; - bit_25 as u64 - }, - ); + >({ + let bit_25: u32 = unsafe { ::std::mem::transmute(bit_25) }; + bit_25 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 25usize, 1u8, - { - let bit_26: u32 = unsafe { ::std::mem::transmute(bit_26) }; - bit_26 as u64 - }, - ); + >({ + let bit_26: u32 = unsafe { ::std::mem::transmute(bit_26) }; + bit_26 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 26usize, 1u8, - { - let bit_27: u32 = unsafe { ::std::mem::transmute(bit_27) }; - bit_27 as u64 - }, - ); + >({ + let bit_27: u32 = unsafe { ::std::mem::transmute(bit_27) }; + bit_27 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 27usize, 1u8, - { - let bit_28: u32 = unsafe { ::std::mem::transmute(bit_28) }; - bit_28 as u64 - }, - ); + >({ + let bit_28: u32 = unsafe { ::std::mem::transmute(bit_28) }; + bit_28 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 28usize, 1u8, - { - let bit_29: u32 = unsafe { ::std::mem::transmute(bit_29) }; - bit_29 as u64 - }, - ); + >({ + let bit_29: u32 = unsafe { ::std::mem::transmute(bit_29) }; + bit_29 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 29usize, 1u8, - { - let bit_30: u32 = unsafe { ::std::mem::transmute(bit_30) }; - bit_30 as u64 - }, - ); + >({ + let bit_30: u32 = unsafe { ::std::mem::transmute(bit_30) }; + bit_30 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 30usize, 1u8, - { - let bit_31: u32 = unsafe { ::std::mem::transmute(bit_31) }; - bit_31 as u64 - }, - ); + >({ + let bit_31: u32 = unsafe { ::std::mem::transmute(bit_31) }; + bit_31 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 31usize, 1u8, - { - let bit_32: u32 = unsafe { ::std::mem::transmute(bit_32) }; - bit_32 as u64 - }, - ); + >({ + let bit_32: u32 = unsafe { ::std::mem::transmute(bit_32) }; + bit_32 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 32usize, 1u8, - { - let bit_33: u32 = unsafe { ::std::mem::transmute(bit_33) }; - bit_33 as u64 - }, - ); + >({ + let bit_33: u32 = unsafe { ::std::mem::transmute(bit_33) }; + bit_33 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 33usize, 1u8, - { - let bit_34: u32 = unsafe { ::std::mem::transmute(bit_34) }; - bit_34 as u64 - }, - ); + >({ + let bit_34: u32 = unsafe { ::std::mem::transmute(bit_34) }; + bit_34 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 34usize, 1u8, - { - let bit_35: u32 = unsafe { ::std::mem::transmute(bit_35) }; - bit_35 as u64 - }, - ); + >({ + let bit_35: u32 = unsafe { ::std::mem::transmute(bit_35) }; + bit_35 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 35usize, 1u8, - { - let bit_36: u32 = unsafe { ::std::mem::transmute(bit_36) }; - bit_36 as u64 - }, - ); + >({ + let bit_36: u32 = unsafe { ::std::mem::transmute(bit_36) }; + bit_36 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 36usize, 1u8, - { - let bit_37: u32 = unsafe { ::std::mem::transmute(bit_37) }; - bit_37 as u64 - }, - ); + >({ + let bit_37: u32 = unsafe { ::std::mem::transmute(bit_37) }; + bit_37 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 37usize, 1u8, - { - let bit_38: u32 = unsafe { ::std::mem::transmute(bit_38) }; - bit_38 as u64 - }, - ); + >({ + let bit_38: u32 = unsafe { ::std::mem::transmute(bit_38) }; + bit_38 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 38usize, 1u8, - { - let bit_39: u32 = unsafe { ::std::mem::transmute(bit_39) }; - bit_39 as u64 - }, - ); + >({ + let bit_39: u32 = unsafe { ::std::mem::transmute(bit_39) }; + bit_39 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 39usize, 1u8, - { - let bit_40: u32 = unsafe { ::std::mem::transmute(bit_40) }; - bit_40 as u64 - }, - ); + >({ + let bit_40: u32 = unsafe { ::std::mem::transmute(bit_40) }; + bit_40 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 40usize, 1u8, - { - let bit_41: u32 = unsafe { ::std::mem::transmute(bit_41) }; - bit_41 as u64 - }, - ); + >({ + let bit_41: u32 = unsafe { ::std::mem::transmute(bit_41) }; + bit_41 as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/jsval_layout_opaque.rs b/bindgen-tests/tests/expectations/tests/jsval_layout_opaque.rs index 82e8f2b4b3..041744a2c1 100644 --- a/bindgen-tests/tests/expectations/tests/jsval_layout_opaque.rs +++ b/bindgen-tests/tests/expectations/tests/jsval_layout_opaque.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} pub const JSVAL_TAG_SHIFT: u32 = 47; pub const JSVAL_PAYLOAD_MASK: u64 = 140737488355327; pub const JSVAL_TAG_MASK: i64 = -140737488355328; @@ -341,13 +612,15 @@ impl Default for jsval_layout__bindgen_ty_1 { impl jsval_layout__bindgen_ty_1 { #[inline] pub fn payload47(&self) -> u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 47u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 47u8>() as u64) + } } #[inline] pub fn set_payload47(&mut self, val: u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 47u8, val as u64) + self._bitfield_1.set_const::<0usize, 47u8>(val as u64) } } #[inline] @@ -356,8 +629,10 @@ impl jsval_layout__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 47u8) - as u64, + >>::raw_get_const::< + 0usize, + 47u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -367,23 +642,23 @@ impl jsval_layout__bindgen_ty_1 { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 47u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn tag(&self) -> JSValueTag { - unsafe { ::std::mem::transmute(self._bitfield_1.get(47usize, 17u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<47usize, 17u8>() as u32) + } } #[inline] pub fn set_tag(&mut self, val: JSValueTag) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(47usize, 17u8, val as u64) + self._bitfield_1.set_const::<47usize, 17u8>(val as u64) } } #[inline] @@ -392,8 +667,10 @@ impl jsval_layout__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 47usize, 17u8) - as u32, + >>::raw_get_const::< + 47usize, + 17u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -403,12 +680,10 @@ impl jsval_layout__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 47usize, 17u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -418,23 +693,21 @@ impl jsval_layout__bindgen_ty_1 { ) -> __BindgenBitfieldUnit<[u8; 8usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 47u8, - { - let payload47: u64 = unsafe { ::std::mem::transmute(payload47) }; - payload47 as u64 - }, - ); + >({ + let payload47: u64 = unsafe { ::std::mem::transmute(payload47) }; + payload47 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 47usize, 17u8, - { - let tag: u32 = unsafe { ::std::mem::transmute(tag) }; - tag as u64 - }, - ); + >({ + let tag: u32 = unsafe { ::std::mem::transmute(tag) }; + tag as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/layout_align.rs b/bindgen-tests/tests/expectations/tests/layout_align.rs index c144d98450..6439b78a3a 100644 --- a/bindgen-tests/tests/expectations/tests/layout_align.rs +++ b/bindgen-tests/tests/expectations/tests/layout_align.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Default)] pub struct __IncompleteArrayField(::std::marker::PhantomData, [T; 0]); @@ -310,13 +581,15 @@ const _: () = { impl rte_eth_link { #[inline] pub fn link_duplex(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u16) + } } #[inline] pub fn set_link_duplex(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -325,8 +598,10 @@ impl rte_eth_link { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u16, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -336,23 +611,23 @@ impl rte_eth_link { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn link_autoneg(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u16) + } } #[inline] pub fn set_link_autoneg(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -361,8 +636,10 @@ impl rte_eth_link { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) - as u16, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -372,23 +649,23 @@ impl rte_eth_link { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn link_status(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u16) + } } #[inline] pub fn set_link_status(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -397,8 +674,10 @@ impl rte_eth_link { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) - as u16, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -408,12 +687,10 @@ impl rte_eth_link { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -424,34 +701,29 @@ impl rte_eth_link { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let link_duplex: u16 = unsafe { ::std::mem::transmute(link_duplex) }; - link_duplex as u64 - }, - ); + >({ + let link_duplex: u16 = unsafe { ::std::mem::transmute(link_duplex) }; + link_duplex as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let link_autoneg: u16 = unsafe { - ::std::mem::transmute(link_autoneg) - }; - link_autoneg as u64 - }, - ); + >({ + let link_autoneg: u16 = unsafe { ::std::mem::transmute(link_autoneg) }; + link_autoneg as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let link_status: u16 = unsafe { ::std::mem::transmute(link_status) }; - link_status as u64 - }, - ); + >({ + let link_status: u16 = unsafe { ::std::mem::transmute(link_status) }; + link_status as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/layout_eth_conf.rs b/bindgen-tests/tests/expectations/tests/layout_eth_conf.rs index 55127f0610..ebc22a6398 100644 --- a/bindgen-tests/tests/expectations/tests/layout_eth_conf.rs +++ b/bindgen-tests/tests/expectations/tests/layout_eth_conf.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} pub const ETH_MQ_RX_RSS_FLAG: u32 = 1; pub const ETH_MQ_RX_DCB_FLAG: u32 = 2; pub const ETH_MQ_RX_VMDQ_FLAG: u32 = 4; @@ -309,13 +580,15 @@ impl Default for rte_eth_rxmode { impl rte_eth_rxmode { #[inline] pub fn header_split(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u16) + } } #[inline] pub fn set_header_split(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -324,8 +597,10 @@ impl rte_eth_rxmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u16, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -335,23 +610,23 @@ impl rte_eth_rxmode { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn hw_ip_checksum(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u16) + } } #[inline] pub fn set_hw_ip_checksum(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -360,8 +635,10 @@ impl rte_eth_rxmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) - as u16, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -371,23 +648,23 @@ impl rte_eth_rxmode { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn hw_vlan_filter(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u16) + } } #[inline] pub fn set_hw_vlan_filter(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -396,8 +673,10 @@ impl rte_eth_rxmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) - as u16, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -407,23 +686,23 @@ impl rte_eth_rxmode { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn hw_vlan_strip(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<3usize, 1u8>() as u16) + } } #[inline] pub fn set_hw_vlan_strip(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(3usize, 1u8, val as u64) + self._bitfield_1.set_const::<3usize, 1u8>(val as u64) } } #[inline] @@ -432,8 +711,10 @@ impl rte_eth_rxmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 3usize, 1u8) - as u16, + >>::raw_get_const::< + 3usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -443,23 +724,23 @@ impl rte_eth_rxmode { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 3usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn hw_vlan_extend(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 1u8>() as u16) + } } #[inline] pub fn set_hw_vlan_extend(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 1u8, val as u64) + self._bitfield_1.set_const::<4usize, 1u8>(val as u64) } } #[inline] @@ -468,8 +749,10 @@ impl rte_eth_rxmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 1u8) - as u16, + >>::raw_get_const::< + 4usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -479,23 +762,23 @@ impl rte_eth_rxmode { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn jumbo_frame(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<5usize, 1u8>() as u16) + } } #[inline] pub fn set_jumbo_frame(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(5usize, 1u8, val as u64) + self._bitfield_1.set_const::<5usize, 1u8>(val as u64) } } #[inline] @@ -504,8 +787,10 @@ impl rte_eth_rxmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 5usize, 1u8) - as u16, + >>::raw_get_const::< + 5usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -515,23 +800,23 @@ impl rte_eth_rxmode { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 5usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn hw_strip_crc(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<6usize, 1u8>() as u16) + } } #[inline] pub fn set_hw_strip_crc(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(6usize, 1u8, val as u64) + self._bitfield_1.set_const::<6usize, 1u8>(val as u64) } } #[inline] @@ -540,8 +825,10 @@ impl rte_eth_rxmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 6usize, 1u8) - as u16, + >>::raw_get_const::< + 6usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -551,23 +838,23 @@ impl rte_eth_rxmode { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 6usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn enable_scatter(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(7usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<7usize, 1u8>() as u16) + } } #[inline] pub fn set_enable_scatter(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(7usize, 1u8, val as u64) + self._bitfield_1.set_const::<7usize, 1u8>(val as u64) } } #[inline] @@ -576,8 +863,10 @@ impl rte_eth_rxmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 7usize, 1u8) - as u16, + >>::raw_get_const::< + 7usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -587,23 +876,23 @@ impl rte_eth_rxmode { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 7usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn enable_lro(&self) -> u16 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<8usize, 1u8>() as u16) + } } #[inline] pub fn set_enable_lro(&mut self, val: u16) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(8usize, 1u8, val as u64) + self._bitfield_1.set_const::<8usize, 1u8>(val as u64) } } #[inline] @@ -612,8 +901,10 @@ impl rte_eth_rxmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 8usize, 1u8) - as u16, + >>::raw_get_const::< + 8usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -623,12 +914,10 @@ impl rte_eth_rxmode { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 8usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -645,100 +934,85 @@ impl rte_eth_rxmode { ) -> __BindgenBitfieldUnit<[u8; 2usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 2usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let header_split: u16 = unsafe { - ::std::mem::transmute(header_split) - }; - header_split as u64 - }, - ); + >({ + let header_split: u16 = unsafe { ::std::mem::transmute(header_split) }; + header_split as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let hw_ip_checksum: u16 = unsafe { - ::std::mem::transmute(hw_ip_checksum) - }; - hw_ip_checksum as u64 - }, - ); + >({ + let hw_ip_checksum: u16 = unsafe { + ::std::mem::transmute(hw_ip_checksum) + }; + hw_ip_checksum as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let hw_vlan_filter: u16 = unsafe { - ::std::mem::transmute(hw_vlan_filter) - }; - hw_vlan_filter as u64 - }, - ); + >({ + let hw_vlan_filter: u16 = unsafe { + ::std::mem::transmute(hw_vlan_filter) + }; + hw_vlan_filter as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 3usize, 1u8, - { - let hw_vlan_strip: u16 = unsafe { - ::std::mem::transmute(hw_vlan_strip) - }; - hw_vlan_strip as u64 - }, - ); + >({ + let hw_vlan_strip: u16 = unsafe { ::std::mem::transmute(hw_vlan_strip) }; + hw_vlan_strip as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 1u8, - { - let hw_vlan_extend: u16 = unsafe { - ::std::mem::transmute(hw_vlan_extend) - }; - hw_vlan_extend as u64 - }, - ); + >({ + let hw_vlan_extend: u16 = unsafe { + ::std::mem::transmute(hw_vlan_extend) + }; + hw_vlan_extend as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 5usize, 1u8, - { - let jumbo_frame: u16 = unsafe { ::std::mem::transmute(jumbo_frame) }; - jumbo_frame as u64 - }, - ); + >({ + let jumbo_frame: u16 = unsafe { ::std::mem::transmute(jumbo_frame) }; + jumbo_frame as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 6usize, 1u8, - { - let hw_strip_crc: u16 = unsafe { - ::std::mem::transmute(hw_strip_crc) - }; - hw_strip_crc as u64 - }, - ); + >({ + let hw_strip_crc: u16 = unsafe { ::std::mem::transmute(hw_strip_crc) }; + hw_strip_crc as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 7usize, 1u8, - { - let enable_scatter: u16 = unsafe { - ::std::mem::transmute(enable_scatter) - }; - enable_scatter as u64 - }, - ); + >({ + let enable_scatter: u16 = unsafe { + ::std::mem::transmute(enable_scatter) + }; + enable_scatter as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 8usize, 1u8, - { - let enable_lro: u16 = unsafe { ::std::mem::transmute(enable_lro) }; - enable_lro as u64 - }, - ); + >({ + let enable_lro: u16 = unsafe { ::std::mem::transmute(enable_lro) }; + enable_lro as u64 + }); __bindgen_bitfield_unit } } @@ -789,13 +1063,15 @@ impl Default for rte_eth_txmode { impl rte_eth_txmode { #[inline] pub fn hw_vlan_reject_tagged(&self) -> u8 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_hw_vlan_reject_tagged(&mut self, val: u8) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -804,7 +1080,10 @@ impl rte_eth_txmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -814,23 +1093,23 @@ impl rte_eth_txmode { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn hw_vlan_reject_untagged(&self) -> u8 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u8) + } } #[inline] pub fn set_hw_vlan_reject_untagged(&mut self, val: u8) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -839,7 +1118,10 @@ impl rte_eth_txmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) as u8, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -849,23 +1131,23 @@ impl rte_eth_txmode { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn hw_vlan_insert_pvid(&self) -> u8 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u8) + } } #[inline] pub fn set_hw_vlan_insert_pvid(&mut self, val: u8) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -874,7 +1156,10 @@ impl rte_eth_txmode { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) as u8, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -884,12 +1169,10 @@ impl rte_eth_txmode { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -900,38 +1183,35 @@ impl rte_eth_txmode { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let hw_vlan_reject_tagged: u8 = unsafe { - ::std::mem::transmute(hw_vlan_reject_tagged) - }; - hw_vlan_reject_tagged as u64 - }, - ); + >({ + let hw_vlan_reject_tagged: u8 = unsafe { + ::std::mem::transmute(hw_vlan_reject_tagged) + }; + hw_vlan_reject_tagged as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let hw_vlan_reject_untagged: u8 = unsafe { - ::std::mem::transmute(hw_vlan_reject_untagged) - }; - hw_vlan_reject_untagged as u64 - }, - ); + >({ + let hw_vlan_reject_untagged: u8 = unsafe { + ::std::mem::transmute(hw_vlan_reject_untagged) + }; + hw_vlan_reject_untagged as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let hw_vlan_insert_pvid: u8 = unsafe { - ::std::mem::transmute(hw_vlan_insert_pvid) - }; - hw_vlan_insert_pvid as u64 - }, - ); + >({ + let hw_vlan_insert_pvid: u8 = unsafe { + ::std::mem::transmute(hw_vlan_insert_pvid) + }; + hw_vlan_insert_pvid as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/layout_mbuf.rs b/bindgen-tests/tests/expectations/tests/layout_mbuf.rs index ee723e601e..270a22a0c0 100644 --- a/bindgen-tests/tests/expectations/tests/layout_mbuf.rs +++ b/bindgen-tests/tests/expectations/tests/layout_mbuf.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} pub const RTE_CACHE_LINE_MIN_SIZE: u32 = 64; pub const RTE_CACHE_LINE_SIZE: u32 = 64; pub type phys_addr_t = u64; @@ -348,13 +619,15 @@ const _: () = { impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { #[inline] pub fn l2_type(&self) -> u32 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 4u8>() as u32) + } } #[inline] pub fn set_l2_type(&mut self, val: u32) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 4u8, val as u64) + self._bitfield_1.set_const::<0usize, 4u8>(val as u64) } } #[inline] @@ -363,8 +636,10 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 4u8) - as u32, + >>::raw_get_const::< + 0usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -374,23 +649,23 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn l3_type(&self) -> u32 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 4u8>() as u32) + } } #[inline] pub fn set_l3_type(&mut self, val: u32) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 4u8, val as u64) + self._bitfield_1.set_const::<4usize, 4u8>(val as u64) } } #[inline] @@ -399,8 +674,10 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 4u8) - as u32, + >>::raw_get_const::< + 4usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -410,23 +687,23 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn l4_type(&self) -> u32 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<8usize, 4u8>() as u32) + } } #[inline] pub fn set_l4_type(&mut self, val: u32) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(8usize, 4u8, val as u64) + self._bitfield_1.set_const::<8usize, 4u8>(val as u64) } } #[inline] @@ -435,8 +712,10 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 8usize, 4u8) - as u32, + >>::raw_get_const::< + 8usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -446,23 +725,23 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 8usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn tun_type(&self) -> u32 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(12usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<12usize, 4u8>() as u32) + } } #[inline] pub fn set_tun_type(&mut self, val: u32) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(12usize, 4u8, val as u64) + self._bitfield_1.set_const::<12usize, 4u8>(val as u64) } } #[inline] @@ -471,8 +750,10 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 12usize, 4u8) - as u32, + >>::raw_get_const::< + 12usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -482,23 +763,23 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 12usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn inner_l2_type(&self) -> u32 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<16usize, 4u8>() as u32) + } } #[inline] pub fn set_inner_l2_type(&mut self, val: u32) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 4u8, val as u64) + self._bitfield_1.set_const::<16usize, 4u8>(val as u64) } } #[inline] @@ -507,8 +788,10 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 16usize, 4u8) - as u32, + >>::raw_get_const::< + 16usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -518,23 +801,23 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 16usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn inner_l3_type(&self) -> u32 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(20usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<20usize, 4u8>() as u32) + } } #[inline] pub fn set_inner_l3_type(&mut self, val: u32) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(20usize, 4u8, val as u64) + self._bitfield_1.set_const::<20usize, 4u8>(val as u64) } } #[inline] @@ -543,8 +826,10 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 20usize, 4u8) - as u32, + >>::raw_get_const::< + 20usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -554,23 +839,23 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 20usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn inner_l4_type(&self) -> u32 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<24usize, 4u8>() as u32) + } } #[inline] pub fn set_inner_l4_type(&mut self, val: u32) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(24usize, 4u8, val as u64) + self._bitfield_1.set_const::<24usize, 4u8>(val as u64) } } #[inline] @@ -579,8 +864,10 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 24usize, 4u8) - as u32, + >>::raw_get_const::< + 24usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -590,12 +877,10 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 24usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -610,74 +895,61 @@ impl rte_mbuf__bindgen_ty_2__bindgen_ty_1 { ) -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 4u8, - { - let l2_type: u32 = unsafe { ::std::mem::transmute(l2_type) }; - l2_type as u64 - }, - ); + >({ + let l2_type: u32 = unsafe { ::std::mem::transmute(l2_type) }; + l2_type as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 4u8, - { - let l3_type: u32 = unsafe { ::std::mem::transmute(l3_type) }; - l3_type as u64 - }, - ); + >({ + let l3_type: u32 = unsafe { ::std::mem::transmute(l3_type) }; + l3_type as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 8usize, 4u8, - { - let l4_type: u32 = unsafe { ::std::mem::transmute(l4_type) }; - l4_type as u64 - }, - ); + >({ + let l4_type: u32 = unsafe { ::std::mem::transmute(l4_type) }; + l4_type as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 12usize, 4u8, - { - let tun_type: u32 = unsafe { ::std::mem::transmute(tun_type) }; - tun_type as u64 - }, - ); + >({ + let tun_type: u32 = unsafe { ::std::mem::transmute(tun_type) }; + tun_type as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 4u8, - { - let inner_l2_type: u32 = unsafe { - ::std::mem::transmute(inner_l2_type) - }; - inner_l2_type as u64 - }, - ); + >({ + let inner_l2_type: u32 = unsafe { ::std::mem::transmute(inner_l2_type) }; + inner_l2_type as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 20usize, 4u8, - { - let inner_l3_type: u32 = unsafe { - ::std::mem::transmute(inner_l3_type) - }; - inner_l3_type as u64 - }, - ); + >({ + let inner_l3_type: u32 = unsafe { ::std::mem::transmute(inner_l3_type) }; + inner_l3_type as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 24usize, 4u8, - { - let inner_l4_type: u32 = unsafe { - ::std::mem::transmute(inner_l4_type) - }; - inner_l4_type as u64 - }, - ); + >({ + let inner_l4_type: u32 = unsafe { ::std::mem::transmute(inner_l4_type) }; + inner_l4_type as u64 + }); __bindgen_bitfield_unit } } @@ -909,13 +1181,15 @@ const _: () = { impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { #[inline] pub fn l2_len(&self) -> u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 7u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 7u8>() as u64) + } } #[inline] pub fn set_l2_len(&mut self, val: u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 7u8, val as u64) + self._bitfield_1.set_const::<0usize, 7u8>(val as u64) } } #[inline] @@ -924,8 +1198,10 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 7u8) - as u64, + >>::raw_get_const::< + 0usize, + 7u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -935,23 +1211,23 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 7u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn l3_len(&self) -> u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(7usize, 9u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<7usize, 9u8>() as u64) + } } #[inline] pub fn set_l3_len(&mut self, val: u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(7usize, 9u8, val as u64) + self._bitfield_1.set_const::<7usize, 9u8>(val as u64) } } #[inline] @@ -960,8 +1236,10 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 7usize, 9u8) - as u64, + >>::raw_get_const::< + 7usize, + 9u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -971,23 +1249,23 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 7usize, 9u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn l4_len(&self) -> u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 8u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<16usize, 8u8>() as u64) + } } #[inline] pub fn set_l4_len(&mut self, val: u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 8u8, val as u64) + self._bitfield_1.set_const::<16usize, 8u8>(val as u64) } } #[inline] @@ -996,8 +1274,10 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 16usize, 8u8) - as u64, + >>::raw_get_const::< + 16usize, + 8u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -1007,23 +1287,23 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 16usize, 8u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn tso_segsz(&self) -> u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 16u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<24usize, 16u8>() as u64) + } } #[inline] pub fn set_tso_segsz(&mut self, val: u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(24usize, 16u8, val as u64) + self._bitfield_1.set_const::<24usize, 16u8>(val as u64) } } #[inline] @@ -1032,8 +1312,10 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 24usize, 16u8) - as u64, + >>::raw_get_const::< + 24usize, + 16u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -1043,23 +1325,23 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 24usize, 16u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn outer_l3_len(&self) -> u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(40usize, 9u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<40usize, 9u8>() as u64) + } } #[inline] pub fn set_outer_l3_len(&mut self, val: u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(40usize, 9u8, val as u64) + self._bitfield_1.set_const::<40usize, 9u8>(val as u64) } } #[inline] @@ -1068,8 +1350,10 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 40usize, 9u8) - as u64, + >>::raw_get_const::< + 40usize, + 9u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -1079,23 +1363,23 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 40usize, 9u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn outer_l2_len(&self) -> u64 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(49usize, 7u8) as u64) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<49usize, 7u8>() as u64) + } } #[inline] pub fn set_outer_l2_len(&mut self, val: u64) { unsafe { let val: u64 = ::std::mem::transmute(val); - self._bitfield_1.set(49usize, 7u8, val as u64) + self._bitfield_1.set_const::<49usize, 7u8>(val as u64) } } #[inline] @@ -1104,8 +1388,10 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 49usize, 7u8) - as u64, + >>::raw_get_const::< + 49usize, + 7u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u64, ) } } @@ -1115,12 +1401,10 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { let val: u64 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 7usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 49usize, 7u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -1134,63 +1418,53 @@ impl rte_mbuf__bindgen_ty_5__bindgen_ty_1 { ) -> __BindgenBitfieldUnit<[u8; 7usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 7usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 7u8, - { - let l2_len: u64 = unsafe { ::std::mem::transmute(l2_len) }; - l2_len as u64 - }, - ); + >({ + let l2_len: u64 = unsafe { ::std::mem::transmute(l2_len) }; + l2_len as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 7usize, 9u8, - { - let l3_len: u64 = unsafe { ::std::mem::transmute(l3_len) }; - l3_len as u64 - }, - ); + >({ + let l3_len: u64 = unsafe { ::std::mem::transmute(l3_len) }; + l3_len as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 8u8, - { - let l4_len: u64 = unsafe { ::std::mem::transmute(l4_len) }; - l4_len as u64 - }, - ); + >({ + let l4_len: u64 = unsafe { ::std::mem::transmute(l4_len) }; + l4_len as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 24usize, 16u8, - { - let tso_segsz: u64 = unsafe { ::std::mem::transmute(tso_segsz) }; - tso_segsz as u64 - }, - ); + >({ + let tso_segsz: u64 = unsafe { ::std::mem::transmute(tso_segsz) }; + tso_segsz as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 40usize, 9u8, - { - let outer_l3_len: u64 = unsafe { - ::std::mem::transmute(outer_l3_len) - }; - outer_l3_len as u64 - }, - ); + >({ + let outer_l3_len: u64 = unsafe { ::std::mem::transmute(outer_l3_len) }; + outer_l3_len as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 49usize, 7u8, - { - let outer_l2_len: u64 = unsafe { - ::std::mem::transmute(outer_l2_len) - }; - outer_l2_len as u64 - }, - ); + >({ + let outer_l2_len: u64 = unsafe { ::std::mem::transmute(outer_l2_len) }; + outer_l2_len as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/only_bitfields.rs b/bindgen-tests/tests/expectations/tests/only_bitfields.rs index bb040c0faf..6e25fd3a1e 100644 --- a/bindgen-tests/tests/expectations/tests/only_bitfields.rs +++ b/bindgen-tests/tests/expectations/tests/only_bitfields.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct C { @@ -230,13 +501,15 @@ const _: () = { impl C { #[inline] pub fn a(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_a(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -245,7 +518,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -255,23 +531,23 @@ impl C { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 7u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 7u8>() as u8) + } } #[inline] pub fn set_b(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 7u8, val as u64) + self._bitfield_1.set_const::<1usize, 7u8>(val as u64) } } #[inline] @@ -280,7 +556,10 @@ impl C { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 7u8) as u8, + >>::raw_get_const::< + 1usize, + 7u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -290,35 +569,31 @@ impl C { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 7u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(a: bool, b: bool) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u8 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u8 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 7u8, - { - let b: u8 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u8 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/packed-bitfield.rs b/bindgen-tests/tests/expectations/tests/packed-bitfield.rs index 39477c1366..a72a42bb11 100644 --- a/bindgen-tests/tests/expectations/tests/packed-bitfield.rs +++ b/bindgen-tests/tests/expectations/tests/packed-bitfield.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C, packed)] #[derive(Debug, Default, Copy, Clone)] pub struct Date { @@ -230,13 +501,15 @@ const _: () = { impl Date { #[inline] pub fn day(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 5u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 5u8>() as u8) + } } #[inline] pub fn set_day(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 5u8, val as u64) + self._bitfield_1.set_const::<0usize, 5u8>(val as u64) } } #[inline] @@ -245,7 +518,10 @@ impl Date { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 5u8) as u8, + >>::raw_get_const::< + 0usize, + 5u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -255,23 +531,23 @@ impl Date { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 5u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn month(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 4u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<5usize, 4u8>() as u8) + } } #[inline] pub fn set_month(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(5usize, 4u8, val as u64) + self._bitfield_1.set_const::<5usize, 4u8>(val as u64) } } #[inline] @@ -280,7 +556,10 @@ impl Date { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 5usize, 4u8) as u8, + >>::raw_get_const::< + 5usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -290,23 +569,23 @@ impl Date { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 5usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn year(&self) -> ::std::os::raw::c_short { - unsafe { ::std::mem::transmute(self._bitfield_1.get(9usize, 15u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<9usize, 15u8>() as u16) + } } #[inline] pub fn set_year(&mut self, val: ::std::os::raw::c_short) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(9usize, 15u8, val as u64) + self._bitfield_1.set_const::<9usize, 15u8>(val as u64) } } #[inline] @@ -315,8 +594,10 @@ impl Date { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 9usize, 15u8) - as u16, + >>::raw_get_const::< + 9usize, + 15u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -326,12 +607,10 @@ impl Date { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 3usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 9usize, 15u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -342,32 +621,29 @@ impl Date { ) -> __BindgenBitfieldUnit<[u8; 3usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 3usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 5u8, - { - let day: u8 = unsafe { ::std::mem::transmute(day) }; - day as u64 - }, - ); + >({ + let day: u8 = unsafe { ::std::mem::transmute(day) }; + day as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 5usize, 4u8, - { - let month: u8 = unsafe { ::std::mem::transmute(month) }; - month as u64 - }, - ); + >({ + let month: u8 = unsafe { ::std::mem::transmute(month) }; + month as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 9usize, 15u8, - { - let year: u16 = unsafe { ::std::mem::transmute(year) }; - year as u64 - }, - ); + >({ + let year: u16 = unsafe { ::std::mem::transmute(year) }; + year as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/private_fields.rs b/bindgen-tests/tests/expectations/tests/private_fields.rs index f4a34b522c..9139267ad4 100644 --- a/bindgen-tests/tests/expectations/tests/private_fields.rs +++ b/bindgen-tests/tests/expectations/tests/private_fields.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct PubPriv { @@ -247,13 +518,15 @@ const _: () = { impl PrivateBitFields { #[inline] fn a(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 4u8>() as u32) + } } #[inline] fn set_a(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 4u8, val as u64) + self._bitfield_1.set_const::<0usize, 4u8>(val as u64) } } #[inline] @@ -262,8 +535,10 @@ impl PrivateBitFields { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 4u8) - as u32, + >>::raw_get_const::< + 0usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -273,23 +548,23 @@ impl PrivateBitFields { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] fn b(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 4u8>() as u32) + } } #[inline] fn set_b(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 4u8, val as u64) + self._bitfield_1.set_const::<4usize, 4u8>(val as u64) } } #[inline] @@ -298,8 +573,10 @@ impl PrivateBitFields { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 4u8) - as u32, + >>::raw_get_const::< + 4usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -309,12 +586,10 @@ impl PrivateBitFields { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -324,23 +599,21 @@ impl PrivateBitFields { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 4u8, - { - let a: u32 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u32 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 4u8, - { - let b: u32 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u32 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } @@ -359,13 +632,15 @@ const _: () = { impl PublicBitFields { #[inline] pub fn a(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 4u8>() as u32) + } } #[inline] pub fn set_a(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 4u8, val as u64) + self._bitfield_1.set_const::<0usize, 4u8>(val as u64) } } #[inline] @@ -374,8 +649,10 @@ impl PublicBitFields { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 4u8) - as u32, + >>::raw_get_const::< + 0usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -385,23 +662,23 @@ impl PublicBitFields { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 4u8>() as u32) + } } #[inline] pub fn set_b(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 4u8, val as u64) + self._bitfield_1.set_const::<4usize, 4u8>(val as u64) } } #[inline] @@ -410,8 +687,10 @@ impl PublicBitFields { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 4u8) - as u32, + >>::raw_get_const::< + 4usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -421,12 +700,10 @@ impl PublicBitFields { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -436,23 +713,21 @@ impl PublicBitFields { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 4u8, - { - let a: u32 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u32 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 4u8, - { - let b: u32 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u32 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit } } @@ -471,13 +746,15 @@ const _: () = { impl MixedBitFields { #[inline] fn a(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 4u8>() as u32) + } } #[inline] fn set_a(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 4u8, val as u64) + self._bitfield_1.set_const::<0usize, 4u8>(val as u64) } } #[inline] @@ -486,8 +763,10 @@ impl MixedBitFields { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 4u8) - as u32, + >>::raw_get_const::< + 0usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -497,23 +776,23 @@ impl MixedBitFields { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn d(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 4u8>() as u32) + } } #[inline] pub fn set_d(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 4u8, val as u64) + self._bitfield_1.set_const::<4usize, 4u8>(val as u64) } } #[inline] @@ -522,8 +801,10 @@ impl MixedBitFields { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 4u8) - as u32, + >>::raw_get_const::< + 4usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -533,12 +814,10 @@ impl MixedBitFields { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -548,23 +827,21 @@ impl MixedBitFields { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 4u8, - { - let a: u32 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u32 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 4u8, - { - let d: u32 = unsafe { ::std::mem::transmute(d) }; - d as u64 - }, - ); + >({ + let d: u32 = unsafe { ::std::mem::transmute(d) }; + d as u64 + }); __bindgen_bitfield_unit } } @@ -713,13 +990,15 @@ const _: () = { impl Override { #[inline] pub fn bf_a(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 4u8>() as u32) + } } #[inline] pub fn set_bf_a(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 4u8, val as u64) + self._bitfield_1.set_const::<0usize, 4u8>(val as u64) } } #[inline] @@ -728,8 +1007,10 @@ impl Override { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 4u8) - as u32, + >>::raw_get_const::< + 0usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -739,23 +1020,23 @@ impl Override { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] fn bf_b(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<4usize, 4u8>() as u32) + } } #[inline] fn set_bf_b(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 4u8, val as u64) + self._bitfield_1.set_const::<4usize, 4u8>(val as u64) } } #[inline] @@ -764,8 +1045,10 @@ impl Override { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 4usize, 4u8) - as u32, + >>::raw_get_const::< + 4usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -775,23 +1058,23 @@ impl Override { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 4usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] fn private_bf_c(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 4u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<8usize, 4u8>() as u32) + } } #[inline] fn set_private_bf_c(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(8usize, 4u8, val as u64) + self._bitfield_1.set_const::<8usize, 4u8>(val as u64) } } #[inline] @@ -800,8 +1083,10 @@ impl Override { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 8usize, 4u8) - as u32, + >>::raw_get_const::< + 8usize, + 4u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -811,12 +1096,10 @@ impl Override { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 8usize, 4u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -827,34 +1110,29 @@ impl Override { ) -> __BindgenBitfieldUnit<[u8; 2usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 2usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 4u8, - { - let bf_a: u32 = unsafe { ::std::mem::transmute(bf_a) }; - bf_a as u64 - }, - ); + >({ + let bf_a: u32 = unsafe { ::std::mem::transmute(bf_a) }; + bf_a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 4usize, 4u8, - { - let bf_b: u32 = unsafe { ::std::mem::transmute(bf_b) }; - bf_b as u64 - }, - ); + >({ + let bf_b: u32 = unsafe { ::std::mem::transmute(bf_b) }; + bf_b as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 8usize, 4u8, - { - let private_bf_c: u32 = unsafe { - ::std::mem::transmute(private_bf_c) - }; - private_bf_c as u64 - }, - ); + >({ + let private_bf_c: u32 = unsafe { ::std::mem::transmute(private_bf_c) }; + private_bf_c as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/redundant-packed-and-align.rs b/bindgen-tests/tests/expectations/tests/redundant-packed-and-align.rs index e3d8aff547..77f9531f47 100644 --- a/bindgen-tests/tests/expectations/tests/redundant-packed-and-align.rs +++ b/bindgen-tests/tests/expectations/tests/redundant-packed-and-align.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[repr(align(8))] #[derive(Debug, Default, Copy, Clone)] @@ -263,13 +534,15 @@ const _: () = { impl redundant_packed_bitfield { #[inline] pub fn b0(&self) -> u8 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_b0(&mut self, val: u8) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -278,7 +551,10 @@ impl redundant_packed_bitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -288,23 +564,23 @@ impl redundant_packed_bitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b1(&self) -> u8 { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u8) + } } #[inline] pub fn set_b1(&mut self, val: u8) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -313,7 +589,10 @@ impl redundant_packed_bitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) as u8, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -323,35 +602,31 @@ impl redundant_packed_bitfield { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn new_bitfield_1(b0: u8, b1: u8) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let b0: u8 = unsafe { ::std::mem::transmute(b0) }; - b0 as u64 - }, - ); + >({ + let b0: u8 = unsafe { ::std::mem::transmute(b0) }; + b0 as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let b1: u8 = unsafe { ::std::mem::transmute(b1) }; - b1 as u64 - }, - ); + >({ + let b1: u8 = unsafe { ::std::mem::transmute(b1) }; + b1 as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/struct_with_bitfields.rs b/bindgen-tests/tests/expectations/tests/struct_with_bitfields.rs index 254e8357bd..20c85ccfc9 100644 --- a/bindgen-tests/tests/expectations/tests/struct_with_bitfields.rs +++ b/bindgen-tests/tests/expectations/tests/struct_with_bitfields.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Default, Copy, Clone, Hash, PartialEq, Eq)] pub struct bitfield { @@ -233,13 +504,15 @@ const _: () = { impl bitfield { #[inline] pub fn a(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u16) + } } #[inline] pub fn set_a(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -248,8 +521,10 @@ impl bitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u16, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -259,23 +534,23 @@ impl bitfield { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<1usize, 1u8>() as u16) + } } #[inline] pub fn set_b(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) + self._bitfield_1.set_const::<1usize, 1u8>(val as u64) } } #[inline] @@ -284,8 +559,10 @@ impl bitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 1usize, 1u8) - as u16, + >>::raw_get_const::< + 1usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -295,23 +572,23 @@ impl bitfield { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 1usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn c(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<2usize, 1u8>() as u16) + } } #[inline] pub fn set_c(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 1u8, val as u64) + self._bitfield_1.set_const::<2usize, 1u8>(val as u64) } } #[inline] @@ -320,8 +597,10 @@ impl bitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 2usize, 1u8) - as u16, + >>::raw_get_const::< + 2usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -331,23 +610,23 @@ impl bitfield { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 2usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn d(&self) -> ::std::os::raw::c_ushort { - unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 2u8) as u16) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<6usize, 2u8>() as u16) + } } #[inline] pub fn set_d(&mut self, val: ::std::os::raw::c_ushort) { unsafe { let val: u16 = ::std::mem::transmute(val); - self._bitfield_1.set(6usize, 2u8, val as u64) + self._bitfield_1.set_const::<6usize, 2u8>(val as u64) } } #[inline] @@ -356,8 +635,10 @@ impl bitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 6usize, 2u8) - as u16, + >>::raw_get_const::< + 6usize, + 2u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u16, ) } } @@ -367,12 +648,10 @@ impl bitfield { let val: u16 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 6usize, 2u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -384,52 +663,50 @@ impl bitfield { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let a: u16 = unsafe { ::std::mem::transmute(a) }; - a as u64 - }, - ); + >({ + let a: u16 = unsafe { ::std::mem::transmute(a) }; + a as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 1usize, 1u8, - { - let b: u16 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u16 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 2usize, 1u8, - { - let c: u16 = unsafe { ::std::mem::transmute(c) }; - c as u64 - }, - ); + >({ + let c: u16 = unsafe { ::std::mem::transmute(c) }; + c as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 6usize, 2u8, - { - let d: u16 = unsafe { ::std::mem::transmute(d) }; - d as u64 - }, - ); + >({ + let d: u16 = unsafe { ::std::mem::transmute(d) }; + d as u64 + }); __bindgen_bitfield_unit } #[inline] pub fn f(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_2.get(0usize, 2u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<0usize, 2u8>() as u32) + } } #[inline] pub fn set_f(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_2.set(0usize, 2u8, val as u64) + self._bitfield_2.set_const::<0usize, 2u8>(val as u64) } } #[inline] @@ -438,8 +715,10 @@ impl bitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 0usize, 2u8) - as u32, + >>::raw_get_const::< + 0usize, + 2u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u32, ) } } @@ -449,23 +728,23 @@ impl bitfield { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 0usize, 2u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn g(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_2.get(32usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<32usize, 32u8>() as u32) + } } #[inline] pub fn set_g(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_2.set(32usize, 32u8, val as u64) + self._bitfield_2.set_const::<32usize, 32u8>(val as u64) } } #[inline] @@ -474,8 +753,10 @@ impl bitfield { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 32usize, 32u8) - as u32, + >>::raw_get_const::< + 32usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u32, ) } } @@ -485,12 +766,10 @@ impl bitfield { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 8usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 32usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] @@ -500,23 +779,21 @@ impl bitfield { ) -> __BindgenBitfieldUnit<[u8; 8usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 2u8, - { - let f: u32 = unsafe { ::std::mem::transmute(f) }; - f as u64 - }, - ); + >({ + let f: u32 = unsafe { ::std::mem::transmute(f) }; + f as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 32usize, 32u8, - { - let g: u32 = unsafe { ::std::mem::transmute(g) }; - g as u64 - }, - ); + >({ + let g: u32 = unsafe { ::std::mem::transmute(g) }; + g as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/timex.rs b/bindgen-tests/tests/expectations/tests/timex.rs index 6a097196e7..7e237c67bf 100644 --- a/bindgen-tests/tests/expectations/tests/timex.rs +++ b/bindgen-tests/tests/expectations/tests/timex.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Debug, Copy, Clone)] pub struct timex { @@ -264,13 +535,15 @@ impl Default for timex_named { impl timex_named { #[inline] pub fn a(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 32u8>() as u32) + } } #[inline] pub fn set_a(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 32u8, val as u64) + self._bitfield_1.set_const::<0usize, 32u8>(val as u64) } } #[inline] @@ -279,8 +552,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 32u8) - as u32, + >>::raw_get_const::< + 0usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -290,23 +565,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn b(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(32usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<32usize, 32u8>() as u32) + } } #[inline] pub fn set_b(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(32usize, 32u8, val as u64) + self._bitfield_1.set_const::<32usize, 32u8>(val as u64) } } #[inline] @@ -315,8 +590,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 32usize, 32u8) - as u32, + >>::raw_get_const::< + 32usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -326,23 +603,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 32usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn c(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(64usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<64usize, 32u8>() as u32) + } } #[inline] pub fn set_c(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(64usize, 32u8, val as u64) + self._bitfield_1.set_const::<64usize, 32u8>(val as u64) } } #[inline] @@ -351,8 +628,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 64usize, 32u8) - as u32, + >>::raw_get_const::< + 64usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -362,23 +641,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 64usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn d(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(96usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<96usize, 32u8>() as u32) + } } #[inline] pub fn set_d(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(96usize, 32u8, val as u64) + self._bitfield_1.set_const::<96usize, 32u8>(val as u64) } } #[inline] @@ -387,8 +666,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 96usize, 32u8) - as u32, + >>::raw_get_const::< + 96usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -398,23 +679,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 96usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn e(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(128usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<128usize, 32u8>() as u32) + } } #[inline] pub fn set_e(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(128usize, 32u8, val as u64) + self._bitfield_1.set_const::<128usize, 32u8>(val as u64) } } #[inline] @@ -423,8 +704,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 128usize, 32u8) - as u32, + >>::raw_get_const::< + 128usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -434,23 +717,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 128usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn f(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(160usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<160usize, 32u8>() as u32) + } } #[inline] pub fn set_f(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(160usize, 32u8, val as u64) + self._bitfield_1.set_const::<160usize, 32u8>(val as u64) } } #[inline] @@ -459,8 +742,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 160usize, 32u8) - as u32, + >>::raw_get_const::< + 160usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -470,23 +755,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 160usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn g(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(192usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<192usize, 32u8>() as u32) + } } #[inline] pub fn set_g(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(192usize, 32u8, val as u64) + self._bitfield_1.set_const::<192usize, 32u8>(val as u64) } } #[inline] @@ -495,8 +780,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 192usize, 32u8) - as u32, + >>::raw_get_const::< + 192usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -506,23 +793,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 192usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn h(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(224usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<224usize, 32u8>() as u32) + } } #[inline] pub fn set_h(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(224usize, 32u8, val as u64) + self._bitfield_1.set_const::<224usize, 32u8>(val as u64) } } #[inline] @@ -531,8 +818,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 224usize, 32u8) - as u32, + >>::raw_get_const::< + 224usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -542,23 +831,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 224usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn i(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(256usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<256usize, 32u8>() as u32) + } } #[inline] pub fn set_i(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(256usize, 32u8, val as u64) + self._bitfield_1.set_const::<256usize, 32u8>(val as u64) } } #[inline] @@ -567,8 +856,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 256usize, 32u8) - as u32, + >>::raw_get_const::< + 256usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -578,23 +869,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 256usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn j(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(288usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<288usize, 32u8>() as u32) + } } #[inline] pub fn set_j(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(288usize, 32u8, val as u64) + self._bitfield_1.set_const::<288usize, 32u8>(val as u64) } } #[inline] @@ -603,8 +894,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 288usize, 32u8) - as u32, + >>::raw_get_const::< + 288usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -614,23 +907,23 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 288usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn k(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(320usize, 32u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<320usize, 32u8>() as u32) + } } #[inline] pub fn set_k(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(320usize, 32u8, val as u64) + self._bitfield_1.set_const::<320usize, 32u8>(val as u64) } } #[inline] @@ -639,8 +932,10 @@ impl timex_named { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 320usize, 32u8) - as u32, + >>::raw_get_const::< + 320usize, + 32u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -650,12 +945,10 @@ impl timex_named { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 44usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 320usize, 32u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } } diff --git a/bindgen-tests/tests/expectations/tests/union_bitfield.rs b/bindgen-tests/tests/expectations/tests/union_bitfield.rs index 465b87c0ce..d9af24c920 100644 --- a/bindgen-tests/tests/expectations/tests/union_bitfield.rs +++ b/bindgen-tests/tests/expectations/tests/union_bitfield.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Copy, Clone)] pub union U4 { @@ -240,13 +511,15 @@ impl Default for U4 { impl U4 { #[inline] pub fn derp(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u32) + } } #[inline] pub fn set_derp(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -255,8 +528,10 @@ impl U4 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) - as u32, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -266,12 +541,10 @@ impl U4 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -280,14 +553,13 @@ impl U4 { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let derp: u32 = unsafe { ::std::mem::transmute(derp) }; - derp as u64 - }, - ); + >({ + let derp: u32 = unsafe { ::std::mem::transmute(derp) }; + derp as u64 + }); __bindgen_bitfield_unit } } @@ -314,13 +586,15 @@ impl Default for B { impl B { #[inline] pub fn foo(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 31u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 31u8>() as u32) + } } #[inline] pub fn set_foo(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 31u8, val as u64) + self._bitfield_1.set_const::<0usize, 31u8>(val as u64) } } #[inline] @@ -329,8 +603,10 @@ impl B { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 31u8) - as u32, + >>::raw_get_const::< + 0usize, + 31u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -340,23 +616,23 @@ impl B { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 31u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bar(&self) -> ::std::os::raw::c_uchar { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 1u8>() as u8) + } } #[inline] pub fn set_bar(&mut self, val: ::std::os::raw::c_uchar) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) + self._bitfield_1.set_const::<0usize, 1u8>(val as u64) } } #[inline] @@ -365,7 +641,10 @@ impl B { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 1u8) as u8, + >>::raw_get_const::< + 0usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u8, ) } } @@ -375,12 +654,10 @@ impl B { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 1usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -390,23 +667,21 @@ impl B { ) -> __BindgenBitfieldUnit<[u8; 1usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 31u8, - { - let foo: u32 = unsafe { ::std::mem::transmute(foo) }; - foo as u64 - }, - ); + >({ + let foo: u32 = unsafe { ::std::mem::transmute(foo) }; + foo as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 1u8, - { - let bar: u8 = unsafe { ::std::mem::transmute(bar) }; - bar as u64 - }, - ); + >({ + let bar: u8 = unsafe { ::std::mem::transmute(bar) }; + bar as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/union_with_anon_struct_bitfield.rs b/bindgen-tests/tests/expectations/tests/union_with_anon_struct_bitfield.rs index d0afed9b46..b757df6f4a 100644 --- a/bindgen-tests/tests/expectations/tests/union_with_anon_struct_bitfield.rs +++ b/bindgen-tests/tests/expectations/tests/union_with_anon_struct_bitfield.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(C)] #[derive(Copy, Clone)] pub union foo { @@ -239,13 +510,15 @@ const _: () = { impl foo__bindgen_ty_1 { #[inline] pub fn b(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 7u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 7u8>() as u32) + } } #[inline] pub fn set_b(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 7u8, val as u64) + self._bitfield_1.set_const::<0usize, 7u8>(val as u64) } } #[inline] @@ -254,8 +527,10 @@ impl foo__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 7u8) - as u32, + >>::raw_get_const::< + 0usize, + 7u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -265,23 +540,23 @@ impl foo__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 7u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn c(&self) -> ::std::os::raw::c_int { - unsafe { ::std::mem::transmute(self._bitfield_1.get(7usize, 25u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<7usize, 25u8>() as u32) + } } #[inline] pub fn set_c(&mut self, val: ::std::os::raw::c_int) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(7usize, 25u8, val as u64) + self._bitfield_1.set_const::<7usize, 25u8>(val as u64) } } #[inline] @@ -290,8 +565,10 @@ impl foo__bindgen_ty_1 { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 7usize, 25u8) - as u32, + >>::raw_get_const::< + 7usize, + 25u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -301,12 +578,10 @@ impl foo__bindgen_ty_1 { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 7usize, 25u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -316,23 +591,21 @@ impl foo__bindgen_ty_1 { ) -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 7u8, - { - let b: u32 = unsafe { ::std::mem::transmute(b) }; - b as u64 - }, - ); + >({ + let b: u32 = unsafe { ::std::mem::transmute(b) }; + b as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 7usize, 25u8, - { - let c: u32 = unsafe { ::std::mem::transmute(c) }; - c as u64 - }, - ); + >({ + let c: u32 = unsafe { ::std::mem::transmute(c) }; + c as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen-tests/tests/expectations/tests/weird_bitfields.rs b/bindgen-tests/tests/expectations/tests/weird_bitfields.rs index e4c988c9a0..7a9269bbef 100644 --- a/bindgen-tests/tests/expectations/tests/weird_bitfields.rs +++ b/bindgen-tests/tests/expectations/tests/weird_bitfields.rs @@ -217,6 +217,277 @@ where } } } +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const(&self) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as usize) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val + |= (self.storage[start_byte + i].reverse_bits() as u64) + << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const(&mut self, val: u64) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = (self.storage[start_byte + i] + & !byte_mask) | (byte_val & byte_mask); + } + i += 1; + } + } + } + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return 0; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val as u64 + } else { + let mut val = 0u64; + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + val >>= bit_shift; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val + } + } + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + if BIT_WIDTH == 0 { + return; + } + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + let storage_ptr = this.cast::<[u8; N]>().cast::(); + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (usize::BITS as usize - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + val <<= bit_shift; + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} #[repr(u32)] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub enum nsStyleSVGOpacitySource { @@ -296,13 +567,15 @@ impl Default for Weird { impl Weird { #[inline] pub fn bitTest(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 16u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<0usize, 16u8>() as u32) + } } #[inline] pub fn set_bitTest(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 16u8, val as u64) + self._bitfield_1.set_const::<0usize, 16u8>(val as u64) } } #[inline] @@ -311,8 +584,10 @@ impl Weird { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 0usize, 16u8) - as u32, + >>::raw_get_const::< + 0usize, + 16u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -322,23 +597,23 @@ impl Weird { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 0usize, 16u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] pub fn bitTest2(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 15u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_1.get_const::<16usize, 15u8>() as u32) + } } #[inline] pub fn set_bitTest2(&mut self, val: ::std::os::raw::c_uint) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 15u8, val as u64) + self._bitfield_1.set_const::<16usize, 15u8>(val as u64) } } #[inline] @@ -347,8 +622,10 @@ impl Weird { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_1), 16usize, 15u8) - as u32, + >>::raw_get_const::< + 16usize, + 15u8, + >(::std::ptr::addr_of!((*this)._bitfield_1)) as u32, ) } } @@ -358,12 +635,10 @@ impl Weird { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 4usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_1), + >>::raw_set_const::< 16usize, 15u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_1), val as u64) } } #[inline] @@ -373,34 +648,34 @@ impl Weird { ) -> __BindgenBitfieldUnit<[u8; 4usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 16u8, - { - let bitTest: u32 = unsafe { ::std::mem::transmute(bitTest) }; - bitTest as u64 - }, - ); + >({ + let bitTest: u32 = unsafe { ::std::mem::transmute(bitTest) }; + bitTest as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 16usize, 15u8, - { - let bitTest2: u32 = unsafe { ::std::mem::transmute(bitTest2) }; - bitTest2 as u64 - }, - ); + >({ + let bitTest2: u32 = unsafe { ::std::mem::transmute(bitTest2) }; + bitTest2 as u64 + }); __bindgen_bitfield_unit } #[inline] pub fn mFillOpacitySource(&self) -> nsStyleSVGOpacitySource { - unsafe { ::std::mem::transmute(self._bitfield_2.get(0usize, 3u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<0usize, 3u8>() as u32) + } } #[inline] pub fn set_mFillOpacitySource(&mut self, val: nsStyleSVGOpacitySource) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_2.set(0usize, 3u8, val as u64) + self._bitfield_2.set_const::<0usize, 3u8>(val as u64) } } #[inline] @@ -409,8 +684,10 @@ impl Weird { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 0usize, 3u8) - as u32, + >>::raw_get_const::< + 0usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u32, ) } } @@ -423,23 +700,23 @@ impl Weird { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 0usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn mStrokeOpacitySource(&self) -> nsStyleSVGOpacitySource { - unsafe { ::std::mem::transmute(self._bitfield_2.get(3usize, 3u8) as u32) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<3usize, 3u8>() as u32) + } } #[inline] pub fn set_mStrokeOpacitySource(&mut self, val: nsStyleSVGOpacitySource) { unsafe { let val: u32 = ::std::mem::transmute(val); - self._bitfield_2.set(3usize, 3u8, val as u64) + self._bitfield_2.set_const::<3usize, 3u8>(val as u64) } } #[inline] @@ -450,8 +727,10 @@ impl Weird { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 3usize, 3u8) - as u32, + >>::raw_get_const::< + 3usize, + 3u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u32, ) } } @@ -464,23 +743,23 @@ impl Weird { let val: u32 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 3usize, 3u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn mStrokeDasharrayFromObject(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_2.get(6usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<6usize, 1u8>() as u8) + } } #[inline] pub fn set_mStrokeDasharrayFromObject(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_2.set(6usize, 1u8, val as u64) + self._bitfield_2.set_const::<6usize, 1u8>(val as u64) } } #[inline] @@ -489,7 +768,10 @@ impl Weird { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 6usize, 1u8) as u8, + >>::raw_get_const::< + 6usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u8, ) } } @@ -499,23 +781,23 @@ impl Weird { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 6usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn mStrokeDashoffsetFromObject(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_2.get(7usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<7usize, 1u8>() as u8) + } } #[inline] pub fn set_mStrokeDashoffsetFromObject(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_2.set(7usize, 1u8, val as u64) + self._bitfield_2.set_const::<7usize, 1u8>(val as u64) } } #[inline] @@ -524,7 +806,10 @@ impl Weird { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 7usize, 1u8) as u8, + >>::raw_get_const::< + 7usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u8, ) } } @@ -534,23 +819,23 @@ impl Weird { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 7usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] pub fn mStrokeWidthFromObject(&self) -> bool { - unsafe { ::std::mem::transmute(self._bitfield_2.get(8usize, 1u8) as u8) } + unsafe { + ::std::mem::transmute(self._bitfield_2.get_const::<8usize, 1u8>() as u8) + } } #[inline] pub fn set_mStrokeWidthFromObject(&mut self, val: bool) { unsafe { let val: u8 = ::std::mem::transmute(val); - self._bitfield_2.set(8usize, 1u8, val as u64) + self._bitfield_2.set_const::<8usize, 1u8>(val as u64) } } #[inline] @@ -559,7 +844,10 @@ impl Weird { ::std::mem::transmute( <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_get(::std::ptr::addr_of!((*this)._bitfield_2), 8usize, 1u8) as u8, + >>::raw_get_const::< + 8usize, + 1u8, + >(::std::ptr::addr_of!((*this)._bitfield_2)) as u8, ) } } @@ -569,12 +857,10 @@ impl Weird { let val: u8 = ::std::mem::transmute(val); <__BindgenBitfieldUnit< [u8; 2usize], - >>::raw_set( - ::std::ptr::addr_of_mut!((*this)._bitfield_2), + >>::raw_set_const::< 8usize, 1u8, - val as u64, - ) + >(::std::ptr::addr_of_mut!((*this)._bitfield_2), val as u64) } } #[inline] @@ -587,60 +873,55 @@ impl Weird { ) -> __BindgenBitfieldUnit<[u8; 2usize]> { let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 2usize]> = Default::default(); __bindgen_bitfield_unit - .set( + .set_const::< 0usize, 3u8, - { - let mFillOpacitySource: u32 = unsafe { - ::std::mem::transmute(mFillOpacitySource) - }; - mFillOpacitySource as u64 - }, - ); + >({ + let mFillOpacitySource: u32 = unsafe { + ::std::mem::transmute(mFillOpacitySource) + }; + mFillOpacitySource as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 3usize, 3u8, - { - let mStrokeOpacitySource: u32 = unsafe { - ::std::mem::transmute(mStrokeOpacitySource) - }; - mStrokeOpacitySource as u64 - }, - ); + >({ + let mStrokeOpacitySource: u32 = unsafe { + ::std::mem::transmute(mStrokeOpacitySource) + }; + mStrokeOpacitySource as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 6usize, 1u8, - { - let mStrokeDasharrayFromObject: u8 = unsafe { - ::std::mem::transmute(mStrokeDasharrayFromObject) - }; - mStrokeDasharrayFromObject as u64 - }, - ); + >({ + let mStrokeDasharrayFromObject: u8 = unsafe { + ::std::mem::transmute(mStrokeDasharrayFromObject) + }; + mStrokeDasharrayFromObject as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 7usize, 1u8, - { - let mStrokeDashoffsetFromObject: u8 = unsafe { - ::std::mem::transmute(mStrokeDashoffsetFromObject) - }; - mStrokeDashoffsetFromObject as u64 - }, - ); + >({ + let mStrokeDashoffsetFromObject: u8 = unsafe { + ::std::mem::transmute(mStrokeDashoffsetFromObject) + }; + mStrokeDashoffsetFromObject as u64 + }); __bindgen_bitfield_unit - .set( + .set_const::< 8usize, 1u8, - { - let mStrokeWidthFromObject: u8 = unsafe { - ::std::mem::transmute(mStrokeWidthFromObject) - }; - mStrokeWidthFromObject as u64 - }, - ); + >({ + let mStrokeWidthFromObject: u8 = unsafe { + ::std::mem::transmute(mStrokeWidthFromObject) + }; + mStrokeWidthFromObject as u64 + }); __bindgen_bitfield_unit } } diff --git a/bindgen/codegen/bitfield_unit.rs b/bindgen/codegen/bitfield_unit.rs index c5ac6637ff..3ca5a8d6db 100644 --- a/bindgen/codegen/bitfield_unit.rs +++ b/bindgen/codegen/bitfield_unit.rs @@ -111,7 +111,8 @@ where if cfg!(target_endian = "big") { for i in 0..bytes_needed { - val |= (storage[start_byte + i].reverse_bits() as u64) << (i * 8); + val |= + (storage[start_byte + i].reverse_bits() as u64) << (i * 8); } } else { for i in 0..bytes_needed { @@ -233,8 +234,9 @@ where let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); storage[start_byte + i] = new_byte.reverse_bits(); } else { - storage[start_byte + i] = - (storage[start_byte + i] & !byte_mask) | (byte_val & byte_mask); + storage[start_byte + i] = (storage[start_byte + i] & + !byte_mask) | + (byte_val & byte_mask); } } } @@ -293,9 +295,365 @@ where unsafe { *byte_ptr = new_byte.reverse_bits() }; } else { unsafe { - *byte_ptr = (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + *byte_ptr = + (*byte_ptr & !byte_mask) | (byte_val & byte_mask) }; } } } } + +/// Const-generic methods for efficient bitfield access when offset and width +/// are known at compile time. +impl __BindgenBitfieldUnit<[u8; N]> { + /// Get a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const fn get_const( + &self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + + if BIT_WIDTH == 0 { + return 0; + } + + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + + // Use usize for fields that fit, u64 only when necessary. + // The compiler eliminates the unused branch since BIT_WIDTH is const. + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i].reverse_bits() + as usize) << + (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as usize) << (i * 8); + i += 1; + } + } + + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> + (usize::BITS as usize - BIT_WIDTH as usize); + } + + val as u64 + } else { + let mut val = 0u64; + + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i].reverse_bits() as u64) << + (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + val |= (self.storage[start_byte + i] as u64) << (i * 8); + i += 1; + } + } + + val >>= bit_shift; + + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + + val + } + } + + /// Set a field using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub fn set_const( + &mut self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + + if BIT_WIDTH == 0 { + return; + } + + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + + // Use usize for fields that fit, u64 only when necessary. + // The compiler eliminates the unused branch since BIT_WIDTH is const. + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> + (usize::BITS as usize - BIT_WIDTH as usize); + } + + val <<= bit_shift; + + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = + (self.storage[start_byte + i] & !byte_mask) | + (byte_val & byte_mask); + } + i += 1; + } + } else { + let mut val = val; + + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + + val <<= bit_shift; + + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + + if cfg!(target_endian = "big") { + let byte = self.storage[start_byte + i].reverse_bits(); + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + self.storage[start_byte + i] = new_byte.reverse_bits(); + } else { + self.storage[start_byte + i] = + (self.storage[start_byte + i] & !byte_mask) | + (byte_val & byte_mask); + } + i += 1; + } + } + } + + /// Raw pointer get using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub const unsafe fn raw_get_const< + const BIT_OFFSET: usize, + const BIT_WIDTH: u8, + >( + this: *const Self, + ) -> u64 { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + + if BIT_WIDTH == 0 { + return 0; + } + + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + + let storage_ptr = + unsafe { core::ptr::addr_of!((*this).storage) as *const u8 }; + + // Use usize for fields that fit, u64 only when necessary. + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = 0usize; + + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as usize) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as usize) << (i * 8); + i += 1; + } + } + + val >>= bit_shift; + val &= (1usize << BIT_WIDTH) - 1; + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> + (usize::BITS as usize - BIT_WIDTH as usize); + } + + val as u64 + } else { + let mut val = 0u64; + + if cfg!(target_endian = "big") { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte.reverse_bits() as u64) << (i * 8); + i += 1; + } + } else { + let mut i = 0; + while i < bytes_needed { + let byte = unsafe { *storage_ptr.add(start_byte + i) }; + val |= (byte as u64) << (i * 8); + i += 1; + } + } + + val >>= bit_shift; + + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + + val + } + } + + /// Raw pointer set using const generics for compile-time optimization. + /// Uses native word size operations when the field fits in usize. + #[inline] + pub unsafe fn raw_set_const< + const BIT_OFFSET: usize, + const BIT_WIDTH: u8, + >( + this: *mut Self, + val: u64, + ) { + debug_assert!(BIT_WIDTH <= 64); + debug_assert!(BIT_OFFSET / 8 < N); + debug_assert!((BIT_OFFSET + (BIT_WIDTH as usize)) / 8 <= N); + + if BIT_WIDTH == 0 { + return; + } + + let start_byte = BIT_OFFSET / 8; + let bit_shift = BIT_OFFSET % 8; + let bytes_needed = (BIT_WIDTH as usize + bit_shift + 7) / 8; + + // Cast through pointer types instead of using addr_of_mut! for const compatibility + let storage_ptr = this.cast::<[u8; N]>().cast::(); + + // Use usize for fields that fit, u64 only when necessary. + if BIT_WIDTH as usize + bit_shift <= usize::BITS as usize { + let mut val = val as usize; + val &= (1usize << BIT_WIDTH) - 1; + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> + (usize::BITS as usize - BIT_WIDTH as usize); + } + + val <<= bit_shift; + + let field_mask = ((1usize << BIT_WIDTH) - 1) << bit_shift; + + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = + (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } else { + let mut val = val; + + if BIT_WIDTH < 64 { + val &= (1u64 << BIT_WIDTH) - 1; + } + + if cfg!(target_endian = "big") { + val = val.reverse_bits() >> (64 - BIT_WIDTH as usize); + } + + val <<= bit_shift; + + let field_mask = if BIT_WIDTH as usize + bit_shift >= 64 { + !0u64 << bit_shift + } else { + ((1u64 << BIT_WIDTH) - 1) << bit_shift + }; + + let mut i = 0; + while i < bytes_needed { + let byte_val = (val >> (i * 8)) as u8; + let byte_mask = (field_mask >> (i * 8)) as u8; + let byte_ptr = unsafe { storage_ptr.add(start_byte + i) }; + + if cfg!(target_endian = "big") { + let byte = unsafe { (*byte_ptr).reverse_bits() }; + let new_byte = (byte & !byte_mask) | (byte_val & byte_mask); + unsafe { *byte_ptr = new_byte.reverse_bits() }; + } else { + unsafe { + *byte_ptr = + (*byte_ptr & !byte_mask) | (byte_val & byte_mask) + }; + } + i += 1; + } + } + } +} diff --git a/bindgen/codegen/bitfield_unit_tests.rs b/bindgen/codegen/bitfield_unit_tests.rs index ead0ffec0c..8c9ffdb0fa 100644 --- a/bindgen/codegen/bitfield_unit_tests.rs +++ b/bindgen/codegen/bitfield_unit_tests.rs @@ -258,3 +258,112 @@ bitfield_unit_set! { set(7, 16, 0b1111111111111111) is 0b00000000011111111111111110000000; set(8, 16, 0b1111111111111111) is 0b00000000111111111111111100000000; } + +// Tests for const-generic methods +#[test] +fn bitfield_unit_get_const_matches_get() { + // Test that get_const produces same results as get + let unit = __BindgenBitfieldUnit::<[u8; 4]>::new([ + 0b01010101, 0b11111111, 0b00000000, 0b11111111, + ]); + + // Single byte tests + assert_eq!(unit.get_const::<0, 1>(), unit.get(0, 1)); + assert_eq!(unit.get_const::<1, 1>(), unit.get(1, 1)); + assert_eq!(unit.get_const::<0, 8>(), unit.get(0, 8)); + assert_eq!(unit.get_const::<3, 5>(), unit.get(3, 5)); + + // Cross-byte boundary tests + assert_eq!(unit.get_const::<0, 16>(), unit.get(0, 16)); + assert_eq!(unit.get_const::<4, 16>(), unit.get(4, 16)); + assert_eq!(unit.get_const::<7, 16>(), unit.get(7, 16)); + assert_eq!(unit.get_const::<8, 16>(), unit.get(8, 16)); + + // Large field + assert_eq!(unit.get_const::<0, 32>(), unit.get(0, 32)); +} + +#[test] +fn bitfield_unit_set_const_matches_set() { + // Test that set_const produces same results as set + let test_value = 0b101010101010; + + for offset in [0, 1, 3, 7, 8, 12] { + for width in [1, 2, 5, 8, 12] { + let mut unit_const = __BindgenBitfieldUnit::<[u8; 4]>::new([0; 4]); + let mut unit_runtime = + __BindgenBitfieldUnit::<[u8; 4]>::new([0; 4]); + + match (offset, width) { + (0, 1) => unit_const.set_const::<0, 1>(test_value), + (0, 2) => unit_const.set_const::<0, 2>(test_value), + (0, 5) => unit_const.set_const::<0, 5>(test_value), + (0, 8) => unit_const.set_const::<0, 8>(test_value), + (0, 12) => unit_const.set_const::<0, 12>(test_value), + (1, 1) => unit_const.set_const::<1, 1>(test_value), + (1, 2) => unit_const.set_const::<1, 2>(test_value), + (1, 5) => unit_const.set_const::<1, 5>(test_value), + (1, 8) => unit_const.set_const::<1, 8>(test_value), + (1, 12) => unit_const.set_const::<1, 12>(test_value), + (3, 1) => unit_const.set_const::<3, 1>(test_value), + (3, 2) => unit_const.set_const::<3, 2>(test_value), + (3, 5) => unit_const.set_const::<3, 5>(test_value), + (3, 8) => unit_const.set_const::<3, 8>(test_value), + (3, 12) => unit_const.set_const::<3, 12>(test_value), + (7, 1) => unit_const.set_const::<7, 1>(test_value), + (7, 2) => unit_const.set_const::<7, 2>(test_value), + (7, 5) => unit_const.set_const::<7, 5>(test_value), + (7, 8) => unit_const.set_const::<7, 8>(test_value), + (7, 12) => unit_const.set_const::<7, 12>(test_value), + (8, 1) => unit_const.set_const::<8, 1>(test_value), + (8, 2) => unit_const.set_const::<8, 2>(test_value), + (8, 5) => unit_const.set_const::<8, 5>(test_value), + (8, 8) => unit_const.set_const::<8, 8>(test_value), + (8, 12) => unit_const.set_const::<8, 12>(test_value), + (12, 1) => unit_const.set_const::<12, 1>(test_value), + (12, 2) => unit_const.set_const::<12, 2>(test_value), + (12, 5) => unit_const.set_const::<12, 5>(test_value), + (12, 8) => unit_const.set_const::<12, 8>(test_value), + (12, 12) => unit_const.set_const::<12, 12>(test_value), + _ => continue, + } + + unit_runtime.set(offset, width, test_value); + // Compare by reading back the full value + assert_eq!(unit_const.get(0, 32), unit_runtime.get(0, 32)); + } + } +} + +#[test] +fn bitfield_unit_raw_const_methods() { + let unit = __BindgenBitfieldUnit::<[u8; 2]>::new([0b10011101, 0b00011101]); + + // Test raw_get_const + unsafe { + assert_eq!( + __BindgenBitfieldUnit::raw_get_const::<0, 8>(&unit), + unit.get(0, 8) + ); + assert_eq!( + __BindgenBitfieldUnit::raw_get_const::<4, 8>(&unit), + unit.get(4, 8) + ); + assert_eq!( + __BindgenBitfieldUnit::raw_get_const::<0, 16>(&unit), + unit.get(0, 16) + ); + } + + // Test raw_set_const + let mut unit_const = __BindgenBitfieldUnit::<[u8; 2]>::new([0; 2]); + let mut unit_runtime = __BindgenBitfieldUnit::<[u8; 2]>::new([0; 2]); + + unsafe { + __BindgenBitfieldUnit::raw_set_const::<3, 5>(&mut unit_const, 0b11111); + } + unit_runtime.set(3, 5, 0b11111); + + // Compare by reading back + assert_eq!(unit_const.get(0, 16), unit_runtime.get(0, 16)); +} diff --git a/bindgen/codegen/mod.rs b/bindgen/codegen/mod.rs index a5aa73b5d8..75801ad117 100644 --- a/bindgen/codegen/mod.rs +++ b/bindgen/codegen/mod.rs @@ -1786,9 +1786,7 @@ impl Bitfield { let prefix = ctx.trait_prefix(); ctor_impl.append_all(quote! { - __bindgen_bitfield_unit.set( - #offset, - #width, + __bindgen_bitfield_unit.set_const::<#offset, #width>( { let #param_name: #bitfield_int_ty = unsafe { ::#prefix::mem::transmute(#param_name) @@ -2149,7 +2147,7 @@ impl<'a> FieldCodegen<'a> for Bitfield { #access_spec fn #getter_name(&self) -> #bitfield_ty { unsafe { ::#prefix::mem::transmute( - self.#unit_field_ident.get(#offset, #width) + self.#unit_field_ident.get_const::<#offset, #width>() as #bitfield_int_ty ) } @@ -2159,9 +2157,7 @@ impl<'a> FieldCodegen<'a> for Bitfield { #access_spec fn #setter_name(&mut self, val: #bitfield_ty) { unsafe { let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); - self.#unit_field_ident.set( - #offset, - #width, + self.#unit_field_ident.set_const::<#offset, #width>( val as u64 ) } @@ -2172,10 +2168,8 @@ impl<'a> FieldCodegen<'a> for Bitfield { #[inline] #access_spec unsafe fn #raw_getter_name(this: *const Self) -> #bitfield_ty { unsafe { - ::#prefix::mem::transmute(<#unit_field_ty>::raw_get( + ::#prefix::mem::transmute(<#unit_field_ty>::raw_get_const::<#offset, #width>( ::#prefix::ptr::addr_of!((*this).#unit_field_ident), - #offset, - #width, ) as #bitfield_int_ty) } } @@ -2184,10 +2178,8 @@ impl<'a> FieldCodegen<'a> for Bitfield { #access_spec unsafe fn #raw_setter_name(this: *mut Self, val: #bitfield_ty) { unsafe { let val: #bitfield_int_ty = ::#prefix::mem::transmute(val); - <#unit_field_ty>::raw_set( + <#unit_field_ty>::raw_set_const::<#offset, #width>( ::#prefix::ptr::addr_of_mut!((*this).#unit_field_ident), - #offset, - #width, val as u64, ) }