@@ -485,11 +485,16 @@ static int npf_fsplit_abs(float f, uint64_t *out_int_part, uint64_t *out_frac_pa
485
485
grisu2 (https://bit.ly/2JgMggX) and ryu (https://bit.ly/2RLXSg0)
486
486
are fast + precise + round, but require large lookup tables. */
487
487
488
+ #if 0
488
489
uint32_t f_bits ; { // union-cast is UB, let compiler optimize byte-copy loop.
489
490
char const * src = (char const * )& f ;
490
491
char * dst = (char * )& f_bits ;
491
492
for (unsigned i = 0 ; i < sizeof (f_bits ); ++ i ) { dst [i ] = src [i ]; }
492
493
}
494
+ #else
495
+ uint32_t f_bits ;
496
+ __builtin_memcpy (& f_bits , & f , sizeof (uint32_t ));
497
+ #endif
493
498
494
499
int const exponent =
495
500
((int )((f_bits >> NPF_MANTISSA_BITS ) & ((1u << NPF_EXPONENT_BITS ) - 1u )) -
@@ -549,12 +554,17 @@ static int npf_fsplit_abs(float f, uint64_t *out_int_part, uint64_t *out_frac_pa
549
554
}
550
555
551
556
static int npf_ftoa_rev (char * buf , float f , char case_adj , int * out_frac_chars ) {
552
- uint32_t f_bits ; { // union-cast is UB, let compiler optimize byte-copy loop.
553
- char const * src = (char const * )& f ;
554
- char * dst = (char * )& f_bits ;
555
- for (unsigned i = 0 ; i < sizeof (f_bits ); ++ i ) { dst [i ] = src [i ]; }
556
- }
557
-
557
+ #if 0
558
+ uint32_t f_bits ; { // union-cast is UB, let compiler optimize byte-copy loop.
559
+ char const * src = (char const * )& f ;
560
+ char * dst = (char * )& f_bits ;
561
+ for (unsigned i = 0 ; i < sizeof (f_bits ); ++ i ) { dst [i ] = src [i ]; }
562
+ }
563
+ #else
564
+ uint32_t f_bits ;
565
+ __builtin_memcpy (& f_bits , & f , sizeof (uint32_t ));
566
+ #endif
567
+
558
568
if ((uint8_t )(f_bits >> 23 ) == 0xFF ) {
559
569
if (f_bits & 0x7fffff ) {
560
570
for (int i = 0 ; i < 3 ; ++ i ) { * buf ++ = (char )("NAN" [i ] + case_adj ); }
0 commit comments