1/* Conditional constant propagation pass for the GNU compiler.
2 Copyright (C) 2000-2025 Free Software Foundation, Inc.
3 Adapted from original RTL SSA-CCP by Daniel Berlin <dberlin@dberlin.org>
4 Adapted to GIMPLE trees by Diego Novillo <dnovillo@redhat.com>
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it
9under the terms of the GNU General Public License as published by the
10Free Software Foundation; either version 3, or (at your option) any
11later version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT
14ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
21
22/* Conditional constant propagation (CCP) is based on the SSA
23 propagation engine (tree-ssa-propagate.cc). Constant assignments of
24 the form VAR = CST are propagated from the assignments into uses of
25 VAR, which in turn may generate new constants. The simulation uses
26 a four level lattice to keep track of constant values associated
27 with SSA names. Given an SSA name V_i, it may take one of the
28 following values:
29
30 UNINITIALIZED -> the initial state of the value. This value
31 is replaced with a correct initial value
32 the first time the value is used, so the
33 rest of the pass does not need to care about
34 it. Using this value simplifies initialization
35 of the pass, and prevents us from needlessly
36 scanning statements that are never reached.
37
38 UNDEFINED -> V_i is a local variable whose definition
39 has not been processed yet. Therefore we
40 don't yet know if its value is a constant
41 or not.
42
43 CONSTANT -> V_i has been found to hold a constant
44 value C.
45
46 VARYING -> V_i cannot take a constant value, or if it
47 does, it is not possible to determine it
48 at compile time.
49
50 The core of SSA-CCP is in ccp_visit_stmt and ccp_visit_phi_node:
51
52 1- In ccp_visit_stmt, we are interested in assignments whose RHS
53 evaluates into a constant and conditional jumps whose predicate
54 evaluates into a boolean true or false. When an assignment of
55 the form V_i = CONST is found, V_i's lattice value is set to
56 CONSTANT and CONST is associated with it. This causes the
57 propagation engine to add all the SSA edges coming out the
58 assignment into the worklists, so that statements that use V_i
59 can be visited.
60
61 If the statement is a conditional with a constant predicate, we
62 mark the outgoing edges as executable or not executable
63 depending on the predicate's value. This is then used when
64 visiting PHI nodes to know when a PHI argument can be ignored.
65
66
67 2- In ccp_visit_phi_node, if all the PHI arguments evaluate to the
68 same constant C, then the LHS of the PHI is set to C. This
69 evaluation is known as the "meet operation". Since one of the
70 goals of this evaluation is to optimistically return constant
71 values as often as possible, it uses two main short cuts:
72
73 - If an argument is flowing in through a non-executable edge, it
74 is ignored. This is useful in cases like this:
75
76 if (PRED)
77 a_9 = 3;
78 else
79 a_10 = 100;
80 a_11 = PHI (a_9, a_10)
81
82 If PRED is known to always evaluate to false, then we can
83 assume that a_11 will always take its value from a_10, meaning
84 that instead of consider it VARYING (a_9 and a_10 have
85 different values), we can consider it CONSTANT 100.
86
87 - If an argument has an UNDEFINED value, then it does not affect
88 the outcome of the meet operation. If a variable V_i has an
89 UNDEFINED value, it means that either its defining statement
90 hasn't been visited yet or V_i has no defining statement, in
91 which case the original symbol 'V' is being used
92 uninitialized. Since 'V' is a local variable, the compiler
93 may assume any initial value for it.
94
95
96 After propagation, every variable V_i that ends up with a lattice
97 value of CONSTANT will have the associated constant value in the
98 array CONST_VAL[i].VALUE. That is fed into substitute_and_fold for
99 final substitution and folding.
100
101 This algorithm uses wide-ints at the max precision of the target.
102 This means that, with one uninteresting exception, variables with
103 UNSIGNED types never go to VARYING because the bits above the
104 precision of the type of the variable are always zero. The
105 uninteresting case is a variable of UNSIGNED type that has the
106 maximum precision of the target. Such variables can go to VARYING,
107 but this causes no loss of infomation since these variables will
108 never be extended.
109
110 References:
111
112 Constant propagation with conditional branches,
113 Wegman and Zadeck, ACM TOPLAS 13(2):181-210.
114
115 Building an Optimizing Compiler,
116 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9.
117
118 Advanced Compiler Design and Implementation,
119 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */
120
121#include "config.h"
122#include "system.h"
123#include "coretypes.h"
124#include "backend.h"
125#include "target.h"
126#include "tree.h"
127#include "gimple.h"
128#include "tree-pass.h"
129#include "ssa.h"
130#include "gimple-pretty-print.h"
131#include "fold-const.h"
132#include "gimple-iterator.h"
133#include "gimple-fold.h"
134#include "tree-eh.h"
135#include "gimplify.h"
136#include "tree-cfg.h"
137#include "tree-ssa-propagate.h"
138#include "dbgcnt.h"
139#include "builtins.h"
140#include "cfgloop.h"
141#include "stor-layout.h"
142#include "optabs-query.h"
143#include "tree-ssa-ccp.h"
144#include "tree-dfa.h"
145#include "diagnostic-core.h"
146#include "stringpool.h"
147#include "attribs.h"
148#include "tree-vector-builder.h"
149#include "cgraph.h"
150#include "alloc-pool.h"
151#include "symbol-summary.h"
152#include "ipa-utils.h"
153#include "sreal.h"
154#include "ipa-cp.h"
155#include "ipa-prop.h"
156#include "internal-fn.h"
157#include "gimple-range.h"
158
159/* Possible lattice values. */
160typedef enum
161{
162 UNINITIALIZED,
163 UNDEFINED,
164 CONSTANT,
165 VARYING
166} ccp_lattice_t;
167
168class ccp_prop_value_t {
169public:
170 /* Lattice value. */
171 ccp_lattice_t lattice_val;
172
173 /* Propagated value. */
174 tree value;
175
176 /* Mask that applies to the propagated value during CCP. For X
177 with a CONSTANT lattice value X & ~mask == value & ~mask. The
178 zero bits in the mask cover constant values. The ones mean no
179 information. */
180 widest_int mask;
181};
182
183class ccp_propagate : public ssa_propagation_engine
184{
185 public:
186 enum ssa_prop_result visit_stmt (gimple *, edge *, tree *) final override;
187 enum ssa_prop_result visit_phi (gphi *) final override;
188};
189
190/* Array of propagated constant values. After propagation,
191 CONST_VAL[I].VALUE holds the constant value for SSA_NAME(I). If
192 the constant is held in an SSA name representing a memory store
193 (i.e., a VDEF), CONST_VAL[I].MEM_REF will contain the actual
194 memory reference used to store (i.e., the LHS of the assignment
195 doing the store). */
196static ccp_prop_value_t *const_val;
197static unsigned n_const_val;
198
199static void canonicalize_value (ccp_prop_value_t *);
200static void ccp_lattice_meet (ccp_prop_value_t *, ccp_prop_value_t *);
201
202/* Dump constant propagation value VAL to file OUTF prefixed by PREFIX. */
203
204static void
205dump_lattice_value (FILE *outf, const char *prefix, ccp_prop_value_t val)
206{
207 switch (val.lattice_val)
208 {
209 case UNINITIALIZED:
210 fprintf (stream: outf, format: "%sUNINITIALIZED", prefix);
211 break;
212 case UNDEFINED:
213 fprintf (stream: outf, format: "%sUNDEFINED", prefix);
214 break;
215 case VARYING:
216 fprintf (stream: outf, format: "%sVARYING", prefix);
217 break;
218 case CONSTANT:
219 if (TREE_CODE (val.value) != INTEGER_CST
220 || val.mask == 0)
221 {
222 fprintf (stream: outf, format: "%sCONSTANT ", prefix);
223 print_generic_expr (outf, val.value, dump_flags);
224 }
225 else
226 {
227 widest_int cval = wi::bit_and_not (x: wi::to_widest (t: val.value),
228 y: val.mask);
229 fprintf (stream: outf, format: "%sCONSTANT ", prefix);
230 print_hex (wi: cval, file: outf);
231 fprintf (stream: outf, format: " (");
232 print_hex (wi: val.mask, file: outf);
233 fprintf (stream: outf, format: ")");
234 }
235 break;
236 default:
237 gcc_unreachable ();
238 }
239}
240
241
242/* Print lattice value VAL to stderr. */
243
244void debug_lattice_value (ccp_prop_value_t val);
245
246DEBUG_FUNCTION void
247debug_lattice_value (ccp_prop_value_t val)
248{
249 dump_lattice_value (stderr, prefix: "", val);
250 fprintf (stderr, format: "\n");
251}
252
253/* Extend NONZERO_BITS to a full mask, based on sgn. */
254
255static widest_int
256extend_mask (const wide_int &nonzero_bits, signop sgn)
257{
258 return widest_int::from (x: nonzero_bits, sgn);
259}
260
261/* Compute a default value for variable VAR and store it in the
262 CONST_VAL array. The following rules are used to get default
263 values:
264
265 1- Global and static variables that are declared constant are
266 considered CONSTANT.
267
268 2- Any other value is considered UNDEFINED. This is useful when
269 considering PHI nodes. PHI arguments that are undefined do not
270 change the constant value of the PHI node, which allows for more
271 constants to be propagated.
272
273 3- Variables defined by statements other than assignments and PHI
274 nodes are considered VARYING.
275
276 4- Initial values of variables that are not GIMPLE registers are
277 considered VARYING. */
278
279static ccp_prop_value_t
280get_default_value (tree var)
281{
282 ccp_prop_value_t val = { .lattice_val: UNINITIALIZED, NULL_TREE, .mask: 0 };
283 gimple *stmt;
284
285 stmt = SSA_NAME_DEF_STMT (var);
286
287 if (gimple_nop_p (g: stmt))
288 {
289 /* Variables defined by an empty statement are those used
290 before being initialized. If VAR is a local variable, we
291 can assume initially that it is UNDEFINED, otherwise we must
292 consider it VARYING. */
293 if (!virtual_operand_p (op: var)
294 && SSA_NAME_VAR (var)
295 && VAR_P (SSA_NAME_VAR (var)))
296 val.lattice_val = UNDEFINED;
297 else
298 {
299 val.lattice_val = VARYING;
300 val.mask = -1;
301 if (flag_tree_bit_ccp && !VECTOR_TYPE_P (TREE_TYPE (var)))
302 {
303 wide_int nonzero_bits = get_nonzero_bits (var);
304 tree value;
305 widest_int mask;
306
307 if (SSA_NAME_VAR (var)
308 && TREE_CODE (SSA_NAME_VAR (var)) == PARM_DECL
309 && ipcp_get_parm_bits (SSA_NAME_VAR (var), &value, &mask))
310 {
311 val.lattice_val = CONSTANT;
312 val.value = value;
313 widest_int ipa_value = wi::to_widest (t: value);
314 /* Unknown bits from IPA CP must be equal to zero. */
315 gcc_assert (wi::bit_and (ipa_value, mask) == 0);
316 val.mask = mask;
317 if (nonzero_bits != -1)
318 val.mask &= extend_mask (nonzero_bits,
319 TYPE_SIGN (TREE_TYPE (var)));
320 }
321 else if (nonzero_bits != -1)
322 {
323 val.lattice_val = CONSTANT;
324 val.value = build_zero_cst (TREE_TYPE (var));
325 val.mask = extend_mask (nonzero_bits,
326 TYPE_SIGN (TREE_TYPE (var)));
327 }
328 }
329 }
330 }
331 else if (is_gimple_assign (gs: stmt))
332 {
333 tree cst;
334 if (gimple_assign_single_p (gs: stmt)
335 && DECL_P (gimple_assign_rhs1 (stmt))
336 && (cst = get_symbol_constant_value (gimple_assign_rhs1 (gs: stmt))))
337 {
338 val.lattice_val = CONSTANT;
339 val.value = cst;
340 }
341 else
342 {
343 /* Any other variable defined by an assignment is considered
344 UNDEFINED. */
345 val.lattice_val = UNDEFINED;
346 }
347 }
348 else if ((is_gimple_call (gs: stmt)
349 && gimple_call_lhs (gs: stmt) != NULL_TREE)
350 || gimple_code (g: stmt) == GIMPLE_PHI)
351 {
352 /* A variable defined by a call or a PHI node is considered
353 UNDEFINED. */
354 val.lattice_val = UNDEFINED;
355 }
356 else
357 {
358 /* Otherwise, VAR will never take on a constant value. */
359 val.lattice_val = VARYING;
360 val.mask = -1;
361 }
362
363 return val;
364}
365
366
367/* Get the constant value associated with variable VAR. */
368
369static inline ccp_prop_value_t *
370get_value (tree var)
371{
372 ccp_prop_value_t *val;
373
374 if (const_val == NULL
375 || SSA_NAME_VERSION (var) >= n_const_val)
376 return NULL;
377
378 val = &const_val[SSA_NAME_VERSION (var)];
379 if (val->lattice_val == UNINITIALIZED)
380 *val = get_default_value (var);
381
382 canonicalize_value (val);
383
384 return val;
385}
386
387/* Return the constant tree value associated with VAR. */
388
389static inline tree
390get_constant_value (tree var)
391{
392 ccp_prop_value_t *val;
393 if (TREE_CODE (var) != SSA_NAME)
394 {
395 if (is_gimple_min_invariant (var))
396 return var;
397 return NULL_TREE;
398 }
399 val = get_value (var);
400 if (val
401 && val->lattice_val == CONSTANT
402 && (TREE_CODE (val->value) != INTEGER_CST
403 || val->mask == 0))
404 return val->value;
405 return NULL_TREE;
406}
407
408/* Sets the value associated with VAR to VARYING. */
409
410static inline void
411set_value_varying (tree var)
412{
413 ccp_prop_value_t *val = &const_val[SSA_NAME_VERSION (var)];
414
415 val->lattice_val = VARYING;
416 val->value = NULL_TREE;
417 val->mask = -1;
418}
419
420/* For integer constants, make sure to drop TREE_OVERFLOW. */
421
422static void
423canonicalize_value (ccp_prop_value_t *val)
424{
425 if (val->lattice_val != CONSTANT)
426 return;
427
428 if (TREE_OVERFLOW_P (val->value))
429 val->value = drop_tree_overflow (val->value);
430}
431
432/* Return whether the lattice transition is valid. */
433
434static bool
435valid_lattice_transition (ccp_prop_value_t old_val, ccp_prop_value_t new_val)
436{
437 /* Lattice transitions must always be monotonically increasing in
438 value. */
439 if (old_val.lattice_val < new_val.lattice_val)
440 return true;
441
442 if (old_val.lattice_val != new_val.lattice_val)
443 return false;
444
445 if (!old_val.value && !new_val.value)
446 return true;
447
448 /* Now both lattice values are CONSTANT. */
449
450 /* Allow arbitrary copy changes as we might look through PHI <a_1, ...>
451 when only a single copy edge is executable. */
452 if (TREE_CODE (old_val.value) == SSA_NAME
453 && TREE_CODE (new_val.value) == SSA_NAME)
454 return true;
455
456 /* Allow transitioning from a constant to a copy. */
457 if (is_gimple_min_invariant (old_val.value)
458 && TREE_CODE (new_val.value) == SSA_NAME)
459 return true;
460
461 /* Allow transitioning from PHI <&x, not executable> == &x
462 to PHI <&x, &y> == common alignment. */
463 if (TREE_CODE (old_val.value) != INTEGER_CST
464 && TREE_CODE (new_val.value) == INTEGER_CST)
465 return true;
466
467 /* Bit-lattices have to agree in the still valid bits. */
468 if (TREE_CODE (old_val.value) == INTEGER_CST
469 && TREE_CODE (new_val.value) == INTEGER_CST)
470 return (wi::bit_and_not (x: wi::to_widest (t: old_val.value), y: new_val.mask)
471 == wi::bit_and_not (x: wi::to_widest (t: new_val.value), y: new_val.mask));
472
473 /* Otherwise constant values have to agree. */
474 if (operand_equal_p (old_val.value, new_val.value, flags: 0))
475 return true;
476
477 /* At least the kinds and types should agree now. */
478 if (TREE_CODE (old_val.value) != TREE_CODE (new_val.value)
479 || !types_compatible_p (TREE_TYPE (old_val.value),
480 TREE_TYPE (new_val.value)))
481 return false;
482
483 /* For floats and !HONOR_NANS allow transitions from (partial) NaN
484 to non-NaN. */
485 tree type = TREE_TYPE (new_val.value);
486 if (SCALAR_FLOAT_TYPE_P (type)
487 && !HONOR_NANS (type))
488 {
489 if (REAL_VALUE_ISNAN (TREE_REAL_CST (old_val.value)))
490 return true;
491 }
492 else if (VECTOR_FLOAT_TYPE_P (type)
493 && !HONOR_NANS (type))
494 {
495 unsigned int count
496 = tree_vector_builder::binary_encoded_nelts (vec1: old_val.value,
497 vec2: new_val.value);
498 for (unsigned int i = 0; i < count; ++i)
499 if (!REAL_VALUE_ISNAN
500 (TREE_REAL_CST (VECTOR_CST_ENCODED_ELT (old_val.value, i)))
501 && !operand_equal_p (VECTOR_CST_ENCODED_ELT (old_val.value, i),
502 VECTOR_CST_ENCODED_ELT (new_val.value, i), flags: 0))
503 return false;
504 return true;
505 }
506 else if (COMPLEX_FLOAT_TYPE_P (type)
507 && !HONOR_NANS (type))
508 {
509 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_REALPART (old_val.value)))
510 && !operand_equal_p (TREE_REALPART (old_val.value),
511 TREE_REALPART (new_val.value), flags: 0))
512 return false;
513 if (!REAL_VALUE_ISNAN (TREE_REAL_CST (TREE_IMAGPART (old_val.value)))
514 && !operand_equal_p (TREE_IMAGPART (old_val.value),
515 TREE_IMAGPART (new_val.value), flags: 0))
516 return false;
517 return true;
518 }
519 return false;
520}
521
522/* Set the value for variable VAR to NEW_VAL. Return true if the new
523 value is different from VAR's previous value. */
524
525static bool
526set_lattice_value (tree var, ccp_prop_value_t *new_val)
527{
528 /* We can deal with old UNINITIALIZED values just fine here. */
529 ccp_prop_value_t *old_val = &const_val[SSA_NAME_VERSION (var)];
530
531 canonicalize_value (val: new_val);
532
533 /* We have to be careful to not go up the bitwise lattice
534 represented by the mask. Instead of dropping to VARYING
535 use the meet operator to retain a conservative value.
536 Missed optimizations like PR65851 makes this necessary.
537 It also ensures we converge to a stable lattice solution. */
538 if (old_val->lattice_val != UNINITIALIZED
539 /* But avoid using meet for constant -> copy transitions. */
540 && !(old_val->lattice_val == CONSTANT
541 && CONSTANT_CLASS_P (old_val->value)
542 && new_val->lattice_val == CONSTANT
543 && TREE_CODE (new_val->value) == SSA_NAME))
544 ccp_lattice_meet (new_val, old_val);
545
546 gcc_checking_assert (valid_lattice_transition (*old_val, *new_val));
547
548 /* If *OLD_VAL and NEW_VAL are the same, return false to inform the
549 caller that this was a non-transition. */
550 if (old_val->lattice_val != new_val->lattice_val
551 || (new_val->lattice_val == CONSTANT
552 && (TREE_CODE (new_val->value) != TREE_CODE (old_val->value)
553 || (TREE_CODE (new_val->value) == INTEGER_CST
554 && (new_val->mask != old_val->mask
555 || (wi::bit_and_not (x: wi::to_widest (t: old_val->value),
556 y: new_val->mask)
557 != wi::bit_and_not (x: wi::to_widest (t: new_val->value),
558 y: new_val->mask))))
559 || (TREE_CODE (new_val->value) != INTEGER_CST
560 && !operand_equal_p (new_val->value, old_val->value, flags: 0)))))
561 {
562 /* ??? We would like to delay creation of INTEGER_CSTs from
563 partially constants here. */
564
565 if (dump_file && (dump_flags & TDF_DETAILS))
566 {
567 dump_lattice_value (outf: dump_file, prefix: "Lattice value changed to ", val: *new_val);
568 fprintf (stream: dump_file, format: ". Adding SSA edges to worklist.\n");
569 }
570
571 *old_val = *new_val;
572
573 gcc_assert (new_val->lattice_val != UNINITIALIZED);
574 return true;
575 }
576
577 return false;
578}
579
580static ccp_prop_value_t get_value_for_expr (tree, bool);
581static ccp_prop_value_t bit_value_binop (enum tree_code, tree, tree, tree);
582void bit_value_binop (enum tree_code, signop, int, widest_int *, widest_int *,
583 signop, int, const widest_int &, const widest_int &,
584 signop, int, const widest_int &, const widest_int &);
585
586/* Return a widest_int that can be used for bitwise simplifications
587 from VAL. */
588
589static widest_int
590value_to_wide_int (ccp_prop_value_t val)
591{
592 if (val.value
593 && TREE_CODE (val.value) == INTEGER_CST)
594 return wi::to_widest (t: val.value);
595
596 return 0;
597}
598
599/* Return the value for the address expression EXPR based on alignment
600 information. */
601
602static ccp_prop_value_t
603get_value_from_alignment (tree expr)
604{
605 tree type = TREE_TYPE (expr);
606 ccp_prop_value_t val;
607 unsigned HOST_WIDE_INT bitpos;
608 unsigned int align;
609
610 gcc_assert (TREE_CODE (expr) == ADDR_EXPR);
611
612 get_pointer_alignment_1 (expr, &align, &bitpos);
613 val.mask = wi::bit_and_not
614 (POINTER_TYPE_P (type) || TYPE_UNSIGNED (type)
615 ? wi::mask <widest_int> (TYPE_PRECISION (type), negate_p: false)
616 : -1,
617 y: align / BITS_PER_UNIT - 1);
618 val.lattice_val
619 = wi::sext (x: val.mask, TYPE_PRECISION (type)) == -1 ? VARYING : CONSTANT;
620 if (val.lattice_val == CONSTANT)
621 val.value = build_int_cstu (type, bitpos / BITS_PER_UNIT);
622 else
623 val.value = NULL_TREE;
624
625 return val;
626}
627
628/* Return the value for the tree operand EXPR. If FOR_BITS_P is true
629 return constant bits extracted from alignment information for
630 invariant addresses. */
631
632static ccp_prop_value_t
633get_value_for_expr (tree expr, bool for_bits_p)
634{
635 ccp_prop_value_t val;
636
637 if (TREE_CODE (expr) == SSA_NAME)
638 {
639 ccp_prop_value_t *val_ = get_value (var: expr);
640 if (val_)
641 val = *val_;
642 else
643 {
644 val.lattice_val = VARYING;
645 val.value = NULL_TREE;
646 val.mask = -1;
647 }
648 if (for_bits_p
649 && val.lattice_val == CONSTANT)
650 {
651 if (TREE_CODE (val.value) == ADDR_EXPR)
652 val = get_value_from_alignment (expr: val.value);
653 else if (TREE_CODE (val.value) != INTEGER_CST)
654 {
655 val.lattice_val = VARYING;
656 val.value = NULL_TREE;
657 val.mask = -1;
658 }
659 }
660 /* Fall back to a copy value. */
661 if (!for_bits_p
662 && val.lattice_val == VARYING
663 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (expr))
664 {
665 val.lattice_val = CONSTANT;
666 val.value = expr;
667 val.mask = -1;
668 }
669 }
670 else if (is_gimple_min_invariant (expr)
671 && (!for_bits_p || TREE_CODE (expr) == INTEGER_CST))
672 {
673 val.lattice_val = CONSTANT;
674 val.value = expr;
675 val.mask = 0;
676 canonicalize_value (val: &val);
677 }
678 else if (TREE_CODE (expr) == ADDR_EXPR)
679 val = get_value_from_alignment (expr);
680 else
681 {
682 val.lattice_val = VARYING;
683 val.mask = -1;
684 val.value = NULL_TREE;
685 }
686
687 if (val.lattice_val == VARYING
688 && INTEGRAL_TYPE_P (TREE_TYPE (expr))
689 && TYPE_UNSIGNED (TREE_TYPE (expr)))
690 val.mask = wi::zext (x: val.mask, TYPE_PRECISION (TREE_TYPE (expr)));
691
692 return val;
693}
694
695/* Return the likely CCP lattice value for STMT.
696
697 If STMT has no operands, then return CONSTANT.
698
699 Else if undefinedness of operands of STMT cause its value to be
700 undefined, then return UNDEFINED.
701
702 Else if any operands of STMT are constants, then return CONSTANT.
703
704 Else return VARYING. */
705
706static ccp_lattice_t
707likely_value (gimple *stmt)
708{
709 bool has_constant_operand, has_undefined_operand, all_undefined_operands;
710 bool has_nsa_operand;
711 tree use;
712 ssa_op_iter iter;
713 unsigned i;
714
715 enum gimple_code code = gimple_code (g: stmt);
716
717 /* This function appears to be called only for assignments, calls,
718 conditionals, and switches, due to the logic in visit_stmt. */
719 gcc_assert (code == GIMPLE_ASSIGN
720 || code == GIMPLE_CALL
721 || code == GIMPLE_COND
722 || code == GIMPLE_SWITCH);
723
724 /* If the statement has volatile operands, it won't fold to a
725 constant value. */
726 if (gimple_has_volatile_ops (stmt))
727 return VARYING;
728
729 /* .DEFERRED_INIT produces undefined. */
730 if (gimple_call_internal_p (gs: stmt, fn: IFN_DEFERRED_INIT))
731 return UNDEFINED;
732
733 /* Arrive here for more complex cases. */
734 has_constant_operand = false;
735 has_undefined_operand = false;
736 all_undefined_operands = true;
737 has_nsa_operand = false;
738 FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE)
739 {
740 ccp_prop_value_t *val = get_value (var: use);
741
742 if (val && val->lattice_val == UNDEFINED)
743 has_undefined_operand = true;
744 else
745 all_undefined_operands = false;
746
747 if (val && val->lattice_val == CONSTANT)
748 has_constant_operand = true;
749
750 if (SSA_NAME_IS_DEFAULT_DEF (use)
751 || !prop_simulate_again_p (SSA_NAME_DEF_STMT (use)))
752 has_nsa_operand = true;
753 }
754
755 /* There may be constants in regular rhs operands. For calls we
756 have to ignore lhs, fndecl and static chain, otherwise only
757 the lhs. */
758 for (i = (is_gimple_call (gs: stmt) ? 2 : 0) + gimple_has_lhs (stmt);
759 i < gimple_num_ops (gs: stmt); ++i)
760 {
761 tree op = gimple_op (gs: stmt, i);
762 if (!op || TREE_CODE (op) == SSA_NAME)
763 continue;
764 if (is_gimple_min_invariant (op))
765 has_constant_operand = true;
766 else if (TREE_CODE (op) == CONSTRUCTOR)
767 {
768 unsigned j;
769 tree val;
770 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (op), j, val)
771 if (CONSTANT_CLASS_P (val))
772 {
773 has_constant_operand = true;
774 break;
775 }
776 }
777 }
778
779 if (has_constant_operand)
780 all_undefined_operands = false;
781
782 if (has_undefined_operand
783 && code == GIMPLE_CALL
784 && gimple_call_internal_p (gs: stmt))
785 switch (gimple_call_internal_fn (gs: stmt))
786 {
787 /* These 3 builtins use the first argument just as a magic
788 way how to find out a decl uid. */
789 case IFN_GOMP_SIMD_LANE:
790 case IFN_GOMP_SIMD_VF:
791 case IFN_GOMP_SIMD_LAST_LANE:
792 has_undefined_operand = false;
793 break;
794 default:
795 break;
796 }
797
798 /* If the operation combines operands like COMPLEX_EXPR make sure to
799 not mark the result UNDEFINED if only one part of the result is
800 undefined. */
801 if (has_undefined_operand && all_undefined_operands)
802 return UNDEFINED;
803 else if (code == GIMPLE_ASSIGN && has_undefined_operand)
804 {
805 switch (gimple_assign_rhs_code (gs: stmt))
806 {
807 /* Unary operators are handled with all_undefined_operands. */
808 case PLUS_EXPR:
809 case MINUS_EXPR:
810 case POINTER_PLUS_EXPR:
811 case BIT_XOR_EXPR:
812 /* Not MIN_EXPR, MAX_EXPR. One VARYING operand may be selected.
813 Not bitwise operators, one VARYING operand may specify the
814 result completely.
815 Not logical operators for the same reason, apart from XOR.
816 Not COMPLEX_EXPR as one VARYING operand makes the result partly
817 not UNDEFINED. Not *DIV_EXPR, comparisons and shifts because
818 the undefined operand may be promoted. */
819 return UNDEFINED;
820
821 case ADDR_EXPR:
822 /* If any part of an address is UNDEFINED, like the index
823 of an ARRAY_EXPR, then treat the result as UNDEFINED. */
824 return UNDEFINED;
825
826 default:
827 ;
828 }
829 }
830 /* If there was an UNDEFINED operand but the result may be not UNDEFINED
831 fall back to CONSTANT. During iteration UNDEFINED may still drop
832 to CONSTANT. */
833 if (has_undefined_operand)
834 return CONSTANT;
835
836 /* We do not consider virtual operands here -- load from read-only
837 memory may have only VARYING virtual operands, but still be
838 constant. Also we can combine the stmt with definitions from
839 operands whose definitions are not simulated again. */
840 if (has_constant_operand
841 || has_nsa_operand
842 || gimple_references_memory_p (stmt))
843 return CONSTANT;
844
845 return VARYING;
846}
847
848/* Returns true if STMT cannot be constant. */
849
850static bool
851surely_varying_stmt_p (gimple *stmt)
852{
853 /* If the statement has operands that we cannot handle, it cannot be
854 constant. */
855 if (gimple_has_volatile_ops (stmt))
856 return true;
857
858 /* If it is a call and does not return a value or is not a
859 builtin and not an indirect call or a call to function with
860 assume_aligned/alloc_align attribute, it is varying. */
861 if (is_gimple_call (gs: stmt))
862 {
863 tree fndecl, fntype = gimple_call_fntype (gs: stmt);
864 if (!gimple_call_lhs (gs: stmt)
865 || ((fndecl = gimple_call_fndecl (gs: stmt)) != NULL_TREE
866 && !fndecl_built_in_p (node: fndecl)
867 && !lookup_attribute (attr_name: "assume_aligned",
868 TYPE_ATTRIBUTES (fntype))
869 && !lookup_attribute (attr_name: "alloc_align",
870 TYPE_ATTRIBUTES (fntype))))
871 return true;
872 }
873
874 /* Any other store operation is not interesting. */
875 else if (gimple_vdef (g: stmt))
876 return true;
877
878 /* Anything other than assignments and conditional jumps are not
879 interesting for CCP. */
880 if (gimple_code (g: stmt) != GIMPLE_ASSIGN
881 && gimple_code (g: stmt) != GIMPLE_COND
882 && gimple_code (g: stmt) != GIMPLE_SWITCH
883 && gimple_code (g: stmt) != GIMPLE_CALL)
884 return true;
885
886 return false;
887}
888
889/* Initialize local data structures for CCP. */
890
891static void
892ccp_initialize (void)
893{
894 basic_block bb;
895
896 n_const_val = num_ssa_names;
897 const_val = XCNEWVEC (ccp_prop_value_t, n_const_val);
898
899 /* Initialize simulation flags for PHI nodes and statements. */
900 FOR_EACH_BB_FN (bb, cfun)
901 {
902 gimple_stmt_iterator i;
903
904 for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (i: &i))
905 {
906 gimple *stmt = gsi_stmt (i);
907 bool is_varying;
908
909 /* If the statement is a control insn, then we do not
910 want to avoid simulating the statement once. Failure
911 to do so means that those edges will never get added. */
912 if (stmt_ends_bb_p (stmt))
913 is_varying = false;
914 else
915 is_varying = surely_varying_stmt_p (stmt);
916
917 if (is_varying)
918 {
919 tree def;
920 ssa_op_iter iter;
921
922 /* If the statement will not produce a constant, mark
923 all its outputs VARYING. */
924 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
925 set_value_varying (def);
926 }
927 prop_set_simulate_again (s: stmt, visit_p: !is_varying);
928 }
929 }
930
931 /* Now process PHI nodes. We never clear the simulate_again flag on
932 phi nodes, since we do not know which edges are executable yet,
933 except for phi nodes for virtual operands when we do not do store ccp. */
934 FOR_EACH_BB_FN (bb, cfun)
935 {
936 gphi_iterator i;
937
938 for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (i: &i))
939 {
940 gphi *phi = i.phi ();
941
942 if (virtual_operand_p (op: gimple_phi_result (gs: phi)))
943 prop_set_simulate_again (s: phi, visit_p: false);
944 else
945 prop_set_simulate_again (s: phi, visit_p: true);
946 }
947 }
948}
949
950/* Debug count support. Reset the values of ssa names
951 VARYING when the total number ssa names analyzed is
952 beyond the debug count specified. */
953
954static void
955do_dbg_cnt (void)
956{
957 unsigned i;
958 for (i = 0; i < num_ssa_names; i++)
959 {
960 if (!dbg_cnt (index: ccp))
961 {
962 const_val[i].lattice_val = VARYING;
963 const_val[i].mask = -1;
964 const_val[i].value = NULL_TREE;
965 }
966 }
967}
968
969
970/* We want to provide our own GET_VALUE and FOLD_STMT virtual methods. */
971class ccp_folder : public substitute_and_fold_engine
972{
973 public:
974 tree value_of_expr (tree, gimple *) final override;
975 bool fold_stmt (gimple_stmt_iterator *) final override;
976};
977
978/* This method just wraps GET_CONSTANT_VALUE for now. Over time
979 naked calls to GET_CONSTANT_VALUE should be eliminated in favor
980 of calling member functions. */
981
982tree
983ccp_folder::value_of_expr (tree op, gimple *)
984{
985 return get_constant_value (var: op);
986}
987
988/* Do final substitution of propagated values, cleanup the flowgraph and
989 free allocated storage. If NONZERO_P, record nonzero bits.
990
991 Return TRUE when something was optimized. */
992
993static bool
994ccp_finalize (bool nonzero_p)
995{
996 bool something_changed;
997 unsigned i;
998 tree name;
999
1000 do_dbg_cnt ();
1001
1002 /* Derive alignment and misalignment information from partially
1003 constant pointers in the lattice or nonzero bits from partially
1004 constant integers. */
1005 FOR_EACH_SSA_NAME (i, name, cfun)
1006 {
1007 ccp_prop_value_t *val;
1008 unsigned int tem, align;
1009
1010 if (!POINTER_TYPE_P (TREE_TYPE (name))
1011 && (!INTEGRAL_TYPE_P (TREE_TYPE (name))
1012 /* Don't record nonzero bits before IPA to avoid
1013 using too much memory. */
1014 || !nonzero_p))
1015 continue;
1016
1017 val = get_value (var: name);
1018 if (val->lattice_val != CONSTANT
1019 || TREE_CODE (val->value) != INTEGER_CST
1020 || val->mask == 0)
1021 continue;
1022
1023 if (POINTER_TYPE_P (TREE_TYPE (name)))
1024 {
1025 /* Trailing mask bits specify the alignment, trailing value
1026 bits the misalignment. */
1027 tem = val->mask.to_uhwi ();
1028 align = least_bit_hwi (x: tem);
1029 if (align > 1)
1030 set_ptr_info_alignment (get_ptr_info (name), align,
1031 (TREE_INT_CST_LOW (val->value)
1032 & (align - 1)));
1033 }
1034 else
1035 {
1036 unsigned int precision = TYPE_PRECISION (TREE_TYPE (val->value));
1037 wide_int value = wi::to_wide (t: val->value);
1038 wide_int mask = wide_int::from (x: val->mask, precision, sgn: UNSIGNED);
1039 value = value & ~mask;
1040 set_bitmask (name, value, mask);
1041 }
1042 }
1043
1044 /* Perform substitutions based on the known constant values. */
1045 class ccp_folder ccp_folder;
1046 something_changed = ccp_folder.substitute_and_fold ();
1047
1048 free (ptr: const_val);
1049 const_val = NULL;
1050 return something_changed;
1051}
1052
1053
1054/* Compute the meet operator between *VAL1 and *VAL2. Store the result
1055 in VAL1.
1056
1057 any M UNDEFINED = any
1058 any M VARYING = VARYING
1059 Ci M Cj = Ci if (i == j)
1060 Ci M Cj = VARYING if (i != j)
1061 */
1062
1063static void
1064ccp_lattice_meet (ccp_prop_value_t *val1, ccp_prop_value_t *val2)
1065{
1066 if (val1->lattice_val == UNDEFINED
1067 /* For UNDEFINED M SSA we can't always SSA because its definition
1068 may not dominate the PHI node. Doing optimistic copy propagation
1069 also causes a lot of gcc.dg/uninit-pred*.c FAILs. */
1070 && (val2->lattice_val != CONSTANT
1071 || TREE_CODE (val2->value) != SSA_NAME))
1072 {
1073 /* UNDEFINED M any = any */
1074 *val1 = *val2;
1075 }
1076 else if (val2->lattice_val == UNDEFINED
1077 /* See above. */
1078 && (val1->lattice_val != CONSTANT
1079 || TREE_CODE (val1->value) != SSA_NAME))
1080 {
1081 /* any M UNDEFINED = any
1082 Nothing to do. VAL1 already contains the value we want. */
1083 ;
1084 }
1085 else if (val1->lattice_val == VARYING
1086 || val2->lattice_val == VARYING)
1087 {
1088 /* any M VARYING = VARYING. */
1089 val1->lattice_val = VARYING;
1090 val1->mask = -1;
1091 val1->value = NULL_TREE;
1092 }
1093 else if (val1->lattice_val == CONSTANT
1094 && val2->lattice_val == CONSTANT
1095 && TREE_CODE (val1->value) == INTEGER_CST
1096 && TREE_CODE (val2->value) == INTEGER_CST)
1097 {
1098 /* Ci M Cj = Ci if (i == j)
1099 Ci M Cj = VARYING if (i != j)
1100
1101 For INTEGER_CSTs mask unequal bits. If no equal bits remain,
1102 drop to varying. */
1103 val1->mask = (val1->mask | val2->mask
1104 | (wi::to_widest (t: val1->value)
1105 ^ wi::to_widest (t: val2->value)));
1106 if (wi::sext (x: val1->mask, TYPE_PRECISION (TREE_TYPE (val1->value))) == -1)
1107 {
1108 val1->lattice_val = VARYING;
1109 val1->value = NULL_TREE;
1110 }
1111 }
1112 else if (val1->lattice_val == CONSTANT
1113 && val2->lattice_val == CONSTANT
1114 && operand_equal_p (val1->value, val2->value, flags: 0))
1115 {
1116 /* Ci M Cj = Ci if (i == j)
1117 Ci M Cj = VARYING if (i != j)
1118
1119 VAL1 already contains the value we want for equivalent values. */
1120 }
1121 else if (val1->lattice_val == CONSTANT
1122 && val2->lattice_val == CONSTANT
1123 && (TREE_CODE (val1->value) == ADDR_EXPR
1124 || TREE_CODE (val2->value) == ADDR_EXPR))
1125 {
1126 /* When not equal addresses are involved try meeting for
1127 alignment. */
1128 ccp_prop_value_t tem = *val2;
1129 if (TREE_CODE (val1->value) == ADDR_EXPR)
1130 *val1 = get_value_for_expr (expr: val1->value, for_bits_p: true);
1131 if (TREE_CODE (val2->value) == ADDR_EXPR)
1132 tem = get_value_for_expr (expr: val2->value, for_bits_p: true);
1133 ccp_lattice_meet (val1, val2: &tem);
1134 }
1135 else
1136 {
1137 /* Any other combination is VARYING. */
1138 val1->lattice_val = VARYING;
1139 val1->mask = -1;
1140 val1->value = NULL_TREE;
1141 }
1142}
1143
1144
1145/* Loop through the PHI_NODE's parameters for BLOCK and compare their
1146 lattice values to determine PHI_NODE's lattice value. The value of a
1147 PHI node is determined calling ccp_lattice_meet with all the arguments
1148 of the PHI node that are incoming via executable edges. */
1149
1150enum ssa_prop_result
1151ccp_propagate::visit_phi (gphi *phi)
1152{
1153 unsigned i;
1154 ccp_prop_value_t new_val;
1155
1156 if (dump_file && (dump_flags & TDF_DETAILS))
1157 {
1158 fprintf (stream: dump_file, format: "\nVisiting PHI node: ");
1159 print_gimple_stmt (dump_file, phi, 0, dump_flags);
1160 }
1161
1162 new_val.lattice_val = UNDEFINED;
1163 new_val.value = NULL_TREE;
1164 new_val.mask = 0;
1165
1166 bool first = true;
1167 bool non_exec_edge = false;
1168 for (i = 0; i < gimple_phi_num_args (gs: phi); i++)
1169 {
1170 /* Compute the meet operator over all the PHI arguments flowing
1171 through executable edges. */
1172 edge e = gimple_phi_arg_edge (phi, i);
1173
1174 if (dump_file && (dump_flags & TDF_DETAILS))
1175 {
1176 fprintf (stream: dump_file,
1177 format: "\tArgument #%d (%d -> %d %sexecutable)\n",
1178 i, e->src->index, e->dest->index,
1179 (e->flags & EDGE_EXECUTABLE) ? "" : "not ");
1180 }
1181
1182 /* If the incoming edge is executable, Compute the meet operator for
1183 the existing value of the PHI node and the current PHI argument. */
1184 if (e->flags & EDGE_EXECUTABLE)
1185 {
1186 tree arg = gimple_phi_arg (gs: phi, index: i)->def;
1187 ccp_prop_value_t arg_val = get_value_for_expr (expr: arg, for_bits_p: false);
1188
1189 if (first)
1190 {
1191 new_val = arg_val;
1192 first = false;
1193 }
1194 else
1195 ccp_lattice_meet (val1: &new_val, val2: &arg_val);
1196
1197 if (dump_file && (dump_flags & TDF_DETAILS))
1198 {
1199 fprintf (stream: dump_file, format: "\t");
1200 print_generic_expr (dump_file, arg, dump_flags);
1201 dump_lattice_value (outf: dump_file, prefix: "\tValue: ", val: arg_val);
1202 fprintf (stream: dump_file, format: "\n");
1203 }
1204
1205 if (new_val.lattice_val == VARYING)
1206 break;
1207 }
1208 else
1209 non_exec_edge = true;
1210 }
1211
1212 /* In case there were non-executable edges and the value is a copy
1213 make sure its definition dominates the PHI node. */
1214 if (non_exec_edge
1215 && new_val.lattice_val == CONSTANT
1216 && TREE_CODE (new_val.value) == SSA_NAME
1217 && ! SSA_NAME_IS_DEFAULT_DEF (new_val.value)
1218 && ! dominated_by_p (CDI_DOMINATORS, gimple_bb (g: phi),
1219 gimple_bb (SSA_NAME_DEF_STMT (new_val.value))))
1220 {
1221 new_val.lattice_val = VARYING;
1222 new_val.value = NULL_TREE;
1223 new_val.mask = -1;
1224 }
1225
1226 if (dump_file && (dump_flags & TDF_DETAILS))
1227 {
1228 dump_lattice_value (outf: dump_file, prefix: "\n PHI node value: ", val: new_val);
1229 fprintf (stream: dump_file, format: "\n\n");
1230 }
1231
1232 /* Make the transition to the new value. */
1233 if (set_lattice_value (var: gimple_phi_result (gs: phi), new_val: &new_val))
1234 {
1235 if (new_val.lattice_val == VARYING)
1236 return SSA_PROP_VARYING;
1237 else
1238 return SSA_PROP_INTERESTING;
1239 }
1240 else
1241 return SSA_PROP_NOT_INTERESTING;
1242}
1243
1244/* Return the constant value for OP or OP otherwise. */
1245
1246static tree
1247valueize_op (tree op)
1248{
1249 if (TREE_CODE (op) == SSA_NAME)
1250 {
1251 tree tem = get_constant_value (var: op);
1252 if (tem)
1253 return tem;
1254 }
1255 return op;
1256}
1257
1258/* Return the constant value for OP, but signal to not follow SSA
1259 edges if the definition may be simulated again. */
1260
1261static tree
1262valueize_op_1 (tree op)
1263{
1264 if (TREE_CODE (op) == SSA_NAME)
1265 {
1266 /* If the definition may be simulated again we cannot follow
1267 this SSA edge as the SSA propagator does not necessarily
1268 re-visit the use. */
1269 gimple *def_stmt = SSA_NAME_DEF_STMT (op);
1270 if (!gimple_nop_p (g: def_stmt)
1271 && prop_simulate_again_p (s: def_stmt))
1272 return NULL_TREE;
1273 tree tem = get_constant_value (var: op);
1274 if (tem)
1275 return tem;
1276 }
1277 return op;
1278}
1279
1280/* CCP specific front-end to the non-destructive constant folding
1281 routines.
1282
1283 Attempt to simplify the RHS of STMT knowing that one or more
1284 operands are constants.
1285
1286 If simplification is possible, return the simplified RHS,
1287 otherwise return the original RHS or NULL_TREE. */
1288
1289static tree
1290ccp_fold (gimple *stmt)
1291{
1292 switch (gimple_code (g: stmt))
1293 {
1294 case GIMPLE_SWITCH:
1295 {
1296 /* Return the constant switch index. */
1297 return valueize_op (op: gimple_switch_index (gs: as_a <gswitch *> (p: stmt)));
1298 }
1299
1300 case GIMPLE_COND:
1301 case GIMPLE_ASSIGN:
1302 case GIMPLE_CALL:
1303 return gimple_fold_stmt_to_constant_1 (stmt,
1304 valueize_op, valueize_op_1);
1305
1306 default:
1307 gcc_unreachable ();
1308 }
1309}
1310
1311/* Determine the minimum and maximum values, *MIN and *MAX respectively,
1312 represented by the mask pair VAL and MASK with signedness SGN and
1313 precision PRECISION. */
1314
1315static void
1316value_mask_to_min_max (widest_int *min, widest_int *max,
1317 const widest_int &val, const widest_int &mask,
1318 signop sgn, int precision)
1319{
1320 *min = wi::bit_and_not (x: val, y: mask);
1321 *max = val | mask;
1322 if (sgn == SIGNED && wi::neg_p (x: mask))
1323 {
1324 widest_int sign_bit = wi::lshift (x: 1, y: precision - 1);
1325 *min ^= sign_bit;
1326 *max ^= sign_bit;
1327 /* MAX is zero extended, and MIN is sign extended. */
1328 *min = wi::ext (x: *min, offset: precision, sgn);
1329 *max = wi::ext (x: *max, offset: precision, sgn);
1330 }
1331}
1332
1333/* Apply the operation CODE in type TYPE to the value, mask pair
1334 RVAL and RMASK representing a value of type RTYPE and set
1335 the value, mask pair *VAL and *MASK to the result. */
1336
1337void
1338bit_value_unop (enum tree_code code, signop type_sgn, int type_precision,
1339 widest_int *val, widest_int *mask,
1340 signop rtype_sgn, int rtype_precision,
1341 const widest_int &rval, const widest_int &rmask)
1342{
1343 switch (code)
1344 {
1345 case BIT_NOT_EXPR:
1346 *mask = rmask;
1347 *val = ~rval;
1348 break;
1349
1350 case NEGATE_EXPR:
1351 {
1352 widest_int temv, temm;
1353 /* Return ~rval + 1. */
1354 bit_value_unop (code: BIT_NOT_EXPR, type_sgn, type_precision, val: &temv, mask: &temm,
1355 rtype_sgn: type_sgn, rtype_precision: type_precision, rval, rmask);
1356 bit_value_binop (PLUS_EXPR, type_sgn, type_precision, val, mask,
1357 type_sgn, type_precision, temv, temm,
1358 type_sgn, type_precision, 1, 0);
1359 break;
1360 }
1361
1362 CASE_CONVERT:
1363 {
1364 /* First extend mask and value according to the original type. */
1365 *mask = wi::ext (x: rmask, offset: rtype_precision, sgn: rtype_sgn);
1366 *val = wi::ext (x: rval, offset: rtype_precision, sgn: rtype_sgn);
1367
1368 /* Then extend mask and value according to the target type. */
1369 *mask = wi::ext (x: *mask, offset: type_precision, sgn: type_sgn);
1370 *val = wi::ext (x: *val, offset: type_precision, sgn: type_sgn);
1371 break;
1372 }
1373
1374 case ABS_EXPR:
1375 case ABSU_EXPR:
1376 if (wi::sext (x: rmask, offset: rtype_precision) == -1)
1377 {
1378 *mask = -1;
1379 *val = 0;
1380 }
1381 else if (wi::neg_p (x: rmask))
1382 {
1383 /* Result is either rval or -rval. */
1384 widest_int temv, temm;
1385 bit_value_unop (code: NEGATE_EXPR, type_sgn: rtype_sgn, type_precision: rtype_precision, val: &temv,
1386 mask: &temm, rtype_sgn: type_sgn, rtype_precision: type_precision, rval, rmask);
1387 temm |= (rmask | (rval ^ temv));
1388 /* Extend the result. */
1389 *mask = wi::ext (x: temm, offset: type_precision, sgn: type_sgn);
1390 *val = wi::ext (x: temv, offset: type_precision, sgn: type_sgn);
1391 }
1392 else if (wi::neg_p (x: rval))
1393 {
1394 bit_value_unop (code: NEGATE_EXPR, type_sgn, type_precision, val, mask,
1395 rtype_sgn: type_sgn, rtype_precision: type_precision, rval, rmask);
1396 }
1397 else
1398 {
1399 *mask = rmask;
1400 *val = rval;
1401 }
1402 break;
1403
1404 default:
1405 *mask = -1;
1406 *val = 0;
1407 break;
1408 }
1409}
1410
1411/* Determine the mask pair *VAL and *MASK from multiplying the
1412 argument mask pair RVAL, RMASK by the unsigned constant C. */
1413static void
1414bit_value_mult_const (signop sgn, int width,
1415 widest_int *val, widest_int *mask,
1416 const widest_int &rval, const widest_int &rmask,
1417 widest_int c)
1418{
1419 widest_int sum_mask = 0;
1420
1421 /* Ensure rval_lo only contains known bits. */
1422 widest_int rval_lo = wi::bit_and_not (x: rval, y: rmask);
1423
1424 if (rval_lo != 0)
1425 {
1426 /* General case (some bits of multiplicand are known set). */
1427 widest_int sum_val = 0;
1428 while (c != 0)
1429 {
1430 /* Determine the lowest bit set in the multiplier. */
1431 int bitpos = wi::ctz (c);
1432 widest_int term_mask = rmask << bitpos;
1433 widest_int term_val = rval_lo << bitpos;
1434
1435 /* sum += term. */
1436 widest_int lo = sum_val + term_val;
1437 widest_int hi = (sum_val | sum_mask) + (term_val | term_mask);
1438 sum_mask |= term_mask | (lo ^ hi);
1439 sum_val = lo;
1440
1441 /* Clear this bit in the multiplier. */
1442 c ^= wi::lshift (x: 1, y: bitpos);
1443 }
1444 /* Correctly extend the result value. */
1445 *val = wi::ext (x: sum_val, offset: width, sgn);
1446 }
1447 else
1448 {
1449 /* Special case (no bits of multiplicand are known set). */
1450 while (c != 0)
1451 {
1452 /* Determine the lowest bit set in the multiplier. */
1453 int bitpos = wi::ctz (c);
1454 widest_int term_mask = rmask << bitpos;
1455
1456 /* sum += term. */
1457 widest_int hi = sum_mask + term_mask;
1458 sum_mask |= term_mask | hi;
1459
1460 /* Clear this bit in the multiplier. */
1461 c ^= wi::lshift (x: 1, y: bitpos);
1462 }
1463 *val = 0;
1464 }
1465
1466 /* Correctly extend the result mask. */
1467 *mask = wi::ext (x: sum_mask, offset: width, sgn);
1468}
1469
1470/* Fill up to MAX values in the BITS array with values representing
1471 each of the non-zero bits in the value X. Returns the number of
1472 bits in X (capped at the maximum value MAX). For example, an X
1473 value 11, places 1, 2 and 8 in BITS and returns the value 3. */
1474
1475static unsigned int
1476get_individual_bits (widest_int *bits, widest_int x, unsigned int max)
1477{
1478 unsigned int count = 0;
1479 while (count < max && x != 0)
1480 {
1481 int bitpos = wi::ctz (x);
1482 bits[count] = wi::lshift (x: 1, y: bitpos);
1483 x ^= bits[count];
1484 count++;
1485 }
1486 return count;
1487}
1488
1489/* Array of 2^N - 1 values representing the bits flipped between
1490 consecutive Gray codes. This is used to efficiently enumerate
1491 all permutations on N bits using XOR. */
1492static const unsigned char gray_code_bit_flips[63] = {
1493 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,
1494 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5,
1495 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4,
1496 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
1497};
1498
1499/* Apply the operation CODE in type TYPE to the value, mask pairs
1500 R1VAL, R1MASK and R2VAL, R2MASK representing a values of type R1TYPE
1501 and R2TYPE and set the value, mask pair *VAL and *MASK to the result. */
1502
1503void
1504bit_value_binop (enum tree_code code, signop sgn, int width,
1505 widest_int *val, widest_int *mask,
1506 signop r1type_sgn, int r1type_precision,
1507 const widest_int &r1val, const widest_int &r1mask,
1508 signop r2type_sgn, int r2type_precision ATTRIBUTE_UNUSED,
1509 const widest_int &r2val, const widest_int &r2mask)
1510{
1511 bool swap_p = false;
1512
1513 /* Assume we'll get a constant result. Use an initial non varying
1514 value, we fall back to varying in the end if necessary. */
1515 *mask = -1;
1516 /* Ensure that VAL is initialized (to any value). */
1517 *val = 0;
1518
1519 switch (code)
1520 {
1521 case BIT_AND_EXPR:
1522 /* The mask is constant where there is a known not
1523 set bit, (m1 | m2) & ((v1 | m1) & (v2 | m2)) */
1524 *mask = (r1mask | r2mask) & (r1val | r1mask) & (r2val | r2mask);
1525 *val = r1val & r2val;
1526 break;
1527
1528 case BIT_IOR_EXPR:
1529 /* The mask is constant where there is a known
1530 set bit, (m1 | m2) & ~((v1 & ~m1) | (v2 & ~m2)). */
1531 *mask = wi::bit_and_not (x: r1mask | r2mask,
1532 y: wi::bit_and_not (x: r1val, y: r1mask)
1533 | wi::bit_and_not (x: r2val, y: r2mask));
1534 *val = r1val | r2val;
1535 break;
1536
1537 case BIT_XOR_EXPR:
1538 /* m1 | m2 */
1539 *mask = r1mask | r2mask;
1540 *val = r1val ^ r2val;
1541 break;
1542
1543 case LROTATE_EXPR:
1544 case RROTATE_EXPR:
1545 if (r2mask == 0)
1546 {
1547 widest_int shift = r2val;
1548 if (shift == 0)
1549 {
1550 *mask = r1mask;
1551 *val = r1val;
1552 }
1553 else
1554 {
1555 if (wi::neg_p (x: shift, sgn: r2type_sgn))
1556 {
1557 shift = -shift;
1558 if (code == RROTATE_EXPR)
1559 code = LROTATE_EXPR;
1560 else
1561 code = RROTATE_EXPR;
1562 }
1563 if (code == RROTATE_EXPR)
1564 {
1565 *mask = wi::rrotate (x: r1mask, y: shift, width);
1566 *val = wi::rrotate (x: r1val, y: shift, width);
1567 }
1568 else
1569 {
1570 *mask = wi::lrotate (x: r1mask, y: shift, width);
1571 *val = wi::lrotate (x: r1val, y: shift, width);
1572 }
1573 *mask = wi::ext (x: *mask, offset: width, sgn);
1574 *val = wi::ext (x: *val, offset: width, sgn);
1575 }
1576 }
1577 else if (wi::ltu_p (x: r2val | r2mask, y: width)
1578 && wi::popcount (r2mask) <= 4)
1579 {
1580 widest_int bits[4];
1581 widest_int res_val, res_mask;
1582 widest_int tmp_val, tmp_mask;
1583 widest_int shift = wi::bit_and_not (x: r2val, y: r2mask);
1584 unsigned int bit_count = get_individual_bits (bits, x: r2mask, max: 4);
1585 unsigned int count = (1 << bit_count) - 1;
1586
1587 /* Initialize result to rotate by smallest value of shift. */
1588 if (code == RROTATE_EXPR)
1589 {
1590 res_mask = wi::rrotate (x: r1mask, y: shift, width);
1591 res_val = wi::rrotate (x: r1val, y: shift, width);
1592 }
1593 else
1594 {
1595 res_mask = wi::lrotate (x: r1mask, y: shift, width);
1596 res_val = wi::lrotate (x: r1val, y: shift, width);
1597 }
1598
1599 /* Iterate through the remaining values of shift. */
1600 for (unsigned int i=0; i<count; i++)
1601 {
1602 shift ^= bits[gray_code_bit_flips[i]];
1603 if (code == RROTATE_EXPR)
1604 {
1605 tmp_mask = wi::rrotate (x: r1mask, y: shift, width);
1606 tmp_val = wi::rrotate (x: r1val, y: shift, width);
1607 }
1608 else
1609 {
1610 tmp_mask = wi::lrotate (x: r1mask, y: shift, width);
1611 tmp_val = wi::lrotate (x: r1val, y: shift, width);
1612 }
1613 /* Accumulate the result. */
1614 res_mask |= tmp_mask | (res_val ^ tmp_val);
1615 }
1616 *val = wi::ext (x: wi::bit_and_not (x: res_val, y: res_mask), offset: width, sgn);
1617 *mask = wi::ext (x: res_mask, offset: width, sgn);
1618 }
1619 break;
1620
1621 case LSHIFT_EXPR:
1622 case RSHIFT_EXPR:
1623 /* ??? We can handle partially known shift counts if we know
1624 its sign. That way we can tell that (x << (y | 8)) & 255
1625 is zero. */
1626 if (r2mask == 0)
1627 {
1628 widest_int shift = r2val;
1629 if (shift == 0)
1630 {
1631 *mask = r1mask;
1632 *val = r1val;
1633 }
1634 else
1635 {
1636 if (wi::neg_p (x: shift, sgn: r2type_sgn))
1637 break;
1638 if (code == RSHIFT_EXPR)
1639 {
1640 *mask = wi::rshift (x: wi::ext (x: r1mask, offset: width, sgn), y: shift, sgn);
1641 *val = wi::rshift (x: wi::ext (x: r1val, offset: width, sgn), y: shift, sgn);
1642 }
1643 else
1644 {
1645 *mask = wi::ext (x: r1mask << shift, offset: width, sgn);
1646 *val = wi::ext (x: r1val << shift, offset: width, sgn);
1647 }
1648 }
1649 }
1650 else if (wi::ltu_p (x: r2val | r2mask, y: width))
1651 {
1652 if (wi::popcount (r2mask) <= 4)
1653 {
1654 widest_int bits[4];
1655 widest_int arg_val, arg_mask;
1656 widest_int res_val, res_mask;
1657 widest_int tmp_val, tmp_mask;
1658 widest_int shift = wi::bit_and_not (x: r2val, y: r2mask);
1659 unsigned int bit_count = get_individual_bits (bits, x: r2mask, max: 4);
1660 unsigned int count = (1 << bit_count) - 1;
1661
1662 /* Initialize result to shift by smallest value of shift. */
1663 if (code == RSHIFT_EXPR)
1664 {
1665 arg_mask = wi::ext (x: r1mask, offset: width, sgn);
1666 arg_val = wi::ext (x: r1val, offset: width, sgn);
1667 res_mask = wi::rshift (x: arg_mask, y: shift, sgn);
1668 res_val = wi::rshift (x: arg_val, y: shift, sgn);
1669 }
1670 else
1671 {
1672 arg_mask = r1mask;
1673 arg_val = r1val;
1674 res_mask = arg_mask << shift;
1675 res_val = arg_val << shift;
1676 }
1677
1678 /* Iterate through the remaining values of shift. */
1679 for (unsigned int i=0; i<count; i++)
1680 {
1681 shift ^= bits[gray_code_bit_flips[i]];
1682 if (code == RSHIFT_EXPR)
1683 {
1684 tmp_mask = wi::rshift (x: arg_mask, y: shift, sgn);
1685 tmp_val = wi::rshift (x: arg_val, y: shift, sgn);
1686 }
1687 else
1688 {
1689 tmp_mask = arg_mask << shift;
1690 tmp_val = arg_val << shift;
1691 }
1692 /* Accumulate the result. */
1693 res_mask |= tmp_mask | (res_val ^ tmp_val);
1694 }
1695 res_mask = wi::ext (x: res_mask, offset: width, sgn);
1696 res_val = wi::ext (x: res_val, offset: width, sgn);
1697 *val = wi::bit_and_not (x: res_val, y: res_mask);
1698 *mask = res_mask;
1699 }
1700 else if ((r1val | r1mask) == 0)
1701 {
1702 /* Handle shifts of zero to avoid undefined wi::ctz below. */
1703 *mask = 0;
1704 *val = 0;
1705 }
1706 else if (code == LSHIFT_EXPR)
1707 {
1708 widest_int tmp = wi::mask <widest_int> (width, negate_p: false);
1709 tmp <<= wi::ctz (r1val | r1mask);
1710 tmp <<= wi::bit_and_not (x: r2val, y: r2mask);
1711 *mask = wi::ext (x: tmp, offset: width, sgn);
1712 *val = 0;
1713 }
1714 else if (!wi::neg_p (x: r1val | r1mask, sgn))
1715 {
1716 /* Logical right shift, or zero sign bit. */
1717 widest_int arg = r1val | r1mask;
1718 int lzcount = wi::clz (arg);
1719 if (lzcount)
1720 lzcount -= wi::get_precision (x: arg) - width;
1721 widest_int tmp = wi::mask <widest_int> (width, negate_p: false);
1722 tmp = wi::lrshift (x: tmp, y: lzcount);
1723 tmp = wi::lrshift (x: tmp, y: wi::bit_and_not (x: r2val, y: r2mask));
1724 *mask = wi::ext (x: tmp, offset: width, sgn);
1725 *val = 0;
1726 }
1727 else if (!wi::neg_p (x: r1mask))
1728 {
1729 /* Arithmetic right shift with set sign bit. */
1730 widest_int arg = wi::bit_and_not (x: r1val, y: r1mask);
1731 int sbcount = wi::clrsb (arg);
1732 sbcount -= wi::get_precision (x: arg) - width;
1733 widest_int tmp = wi::mask <widest_int> (width, negate_p: false);
1734 tmp = wi::lrshift (x: tmp, y: sbcount);
1735 tmp = wi::lrshift (x: tmp, y: wi::bit_and_not (x: r2val, y: r2mask));
1736 *mask = wi::sext (x: tmp, offset: width);
1737 tmp = wi::bit_not (x: tmp);
1738 *val = wi::sext (x: tmp, offset: width);
1739 }
1740 }
1741 break;
1742
1743 case PLUS_EXPR:
1744 case POINTER_PLUS_EXPR:
1745 {
1746 /* Do the addition with unknown bits set to zero, to give carry-ins of
1747 zero wherever possible. */
1748 widest_int lo = (wi::bit_and_not (x: r1val, y: r1mask)
1749 + wi::bit_and_not (x: r2val, y: r2mask));
1750 lo = wi::ext (x: lo, offset: width, sgn);
1751 /* Do the addition with unknown bits set to one, to give carry-ins of
1752 one wherever possible. */
1753 widest_int hi = (r1val | r1mask) + (r2val | r2mask);
1754 hi = wi::ext (x: hi, offset: width, sgn);
1755 /* Each bit in the result is known if (a) the corresponding bits in
1756 both inputs are known, and (b) the carry-in to that bit position
1757 is known. We can check condition (b) by seeing if we got the same
1758 result with minimised carries as with maximised carries. */
1759 *mask = r1mask | r2mask | (lo ^ hi);
1760 *mask = wi::ext (x: *mask, offset: width, sgn);
1761 /* It shouldn't matter whether we choose lo or hi here. */
1762 *val = lo;
1763 break;
1764 }
1765
1766 case MINUS_EXPR:
1767 case POINTER_DIFF_EXPR:
1768 {
1769 /* Subtraction is derived from the addition algorithm above. */
1770 widest_int lo = wi::bit_and_not (x: r1val, y: r1mask) - (r2val | r2mask);
1771 lo = wi::ext (x: lo, offset: width, sgn);
1772 widest_int hi = (r1val | r1mask) - wi::bit_and_not (x: r2val, y: r2mask);
1773 hi = wi::ext (x: hi, offset: width, sgn);
1774 *mask = r1mask | r2mask | (lo ^ hi);
1775 *mask = wi::ext (x: *mask, offset: width, sgn);
1776 *val = lo;
1777 break;
1778 }
1779
1780 case MULT_EXPR:
1781 if (r2mask == 0
1782 && !wi::neg_p (x: r2val, sgn)
1783 && (flag_expensive_optimizations || wi::popcount (r2val) < 8))
1784 bit_value_mult_const (sgn, width, val, mask, rval: r1val, rmask: r1mask, c: r2val);
1785 else if (r1mask == 0
1786 && !wi::neg_p (x: r1val, sgn)
1787 && (flag_expensive_optimizations || wi::popcount (r1val) < 8))
1788 bit_value_mult_const (sgn, width, val, mask, rval: r2val, rmask: r2mask, c: r1val);
1789 else
1790 {
1791 /* Just track trailing zeros in both operands and transfer
1792 them to the other. */
1793 int r1tz = wi::ctz (r1val | r1mask);
1794 int r2tz = wi::ctz (r2val | r2mask);
1795 if (r1tz + r2tz >= width)
1796 {
1797 *mask = 0;
1798 *val = 0;
1799 }
1800 else if (r1tz + r2tz > 0)
1801 {
1802 *mask = wi::ext (x: wi::mask <widest_int> (width: r1tz + r2tz, negate_p: true),
1803 offset: width, sgn);
1804 *val = 0;
1805 }
1806 }
1807 break;
1808
1809 case EQ_EXPR:
1810 case NE_EXPR:
1811 {
1812 widest_int m = r1mask | r2mask;
1813 if (wi::bit_and_not (x: r1val, y: m) != wi::bit_and_not (x: r2val, y: m))
1814 {
1815 *mask = 0;
1816 *val = ((code == EQ_EXPR) ? 0 : 1);
1817 }
1818 else
1819 {
1820 /* We know the result of a comparison is always one or zero. */
1821 *mask = 1;
1822 *val = 0;
1823 }
1824 break;
1825 }
1826
1827 case GE_EXPR:
1828 case GT_EXPR:
1829 swap_p = true;
1830 code = swap_tree_comparison (code);
1831 /* Fall through. */
1832 case LT_EXPR:
1833 case LE_EXPR:
1834 {
1835 widest_int min1, max1, min2, max2;
1836 int minmax, maxmin;
1837
1838 const widest_int &o1val = swap_p ? r2val : r1val;
1839 const widest_int &o1mask = swap_p ? r2mask : r1mask;
1840 const widest_int &o2val = swap_p ? r1val : r2val;
1841 const widest_int &o2mask = swap_p ? r1mask : r2mask;
1842
1843 value_mask_to_min_max (min: &min1, max: &max1, val: o1val, mask: o1mask,
1844 sgn: r1type_sgn, precision: r1type_precision);
1845 value_mask_to_min_max (min: &min2, max: &max2, val: o2val, mask: o2mask,
1846 sgn: r1type_sgn, precision: r1type_precision);
1847
1848 /* For comparisons the signedness is in the comparison operands. */
1849 /* Do a cross comparison of the max/min pairs. */
1850 maxmin = wi::cmp (x: max1, y: min2, sgn: r1type_sgn);
1851 minmax = wi::cmp (x: min1, y: max2, sgn: r1type_sgn);
1852 if (maxmin < (code == LE_EXPR ? 1 : 0)) /* o1 < or <= o2. */
1853 {
1854 *mask = 0;
1855 *val = 1;
1856 }
1857 else if (minmax > (code == LT_EXPR ? -1 : 0)) /* o1 >= or > o2. */
1858 {
1859 *mask = 0;
1860 *val = 0;
1861 }
1862 else if (maxmin == minmax) /* o1 and o2 are equal. */
1863 {
1864 /* This probably should never happen as we'd have
1865 folded the thing during fully constant value folding. */
1866 *mask = 0;
1867 *val = (code == LE_EXPR ? 1 : 0);
1868 }
1869 else
1870 {
1871 /* We know the result of a comparison is always one or zero. */
1872 *mask = 1;
1873 *val = 0;
1874 }
1875 break;
1876 }
1877
1878 case MIN_EXPR:
1879 case MAX_EXPR:
1880 {
1881 widest_int min1, max1, min2, max2;
1882
1883 value_mask_to_min_max (min: &min1, max: &max1, val: r1val, mask: r1mask, sgn, precision: width);
1884 value_mask_to_min_max (min: &min2, max: &max2, val: r2val, mask: r2mask, sgn, precision: width);
1885
1886 if (wi::cmp (x: max1, y: min2, sgn) <= 0) /* r1 is less than r2. */
1887 {
1888 if (code == MIN_EXPR)
1889 {
1890 *mask = r1mask;
1891 *val = r1val;
1892 }
1893 else
1894 {
1895 *mask = r2mask;
1896 *val = r2val;
1897 }
1898 }
1899 else if (wi::cmp (x: min1, y: max2, sgn) >= 0) /* r2 is less than r1. */
1900 {
1901 if (code == MIN_EXPR)
1902 {
1903 *mask = r2mask;
1904 *val = r2val;
1905 }
1906 else
1907 {
1908 *mask = r1mask;
1909 *val = r1val;
1910 }
1911 }
1912 else
1913 {
1914 /* The result is either r1 or r2. */
1915 *mask = r1mask | r2mask | (r1val ^ r2val);
1916 *val = r1val;
1917 }
1918 break;
1919 }
1920
1921 case TRUNC_MOD_EXPR:
1922 {
1923 widest_int r1max = r1val | r1mask;
1924 widest_int r2max = r2val | r2mask;
1925 if (r2mask == 0)
1926 {
1927 widest_int shift = wi::exact_log2 (r2val);
1928 if (shift != -1)
1929 {
1930 // Handle modulo by a power of 2 as a bitwise and.
1931 widest_int tem_val, tem_mask;
1932 bit_value_binop (code: BIT_AND_EXPR, sgn, width, val: &tem_val, mask: &tem_mask,
1933 r1type_sgn, r1type_precision, r1val, r1mask,
1934 r2type_sgn, r2type_precision,
1935 r2val: r2val - 1, r2mask);
1936 if (sgn == UNSIGNED
1937 || !wi::neg_p (x: r1max)
1938 || (tem_mask == 0 && tem_val == 0))
1939 {
1940 *val = tem_val;
1941 *mask = tem_mask;
1942 return;
1943 }
1944 }
1945 }
1946 if (sgn == UNSIGNED
1947 || (!wi::neg_p (x: r1max) && !wi::neg_p (x: r2max)))
1948 {
1949 /* Confirm R2 has some bits set, to avoid division by zero. */
1950 widest_int r2min = wi::bit_and_not (x: r2val, y: r2mask);
1951 if (r2min != 0)
1952 {
1953 /* R1 % R2 is R1 if R1 is always less than R2. */
1954 if (wi::ltu_p (x: r1max, y: r2min))
1955 {
1956 *mask = r1mask;
1957 *val = r1val;
1958 }
1959 else
1960 {
1961 /* R1 % R2 is always less than the maximum of R2. */
1962 unsigned int lzcount = wi::clz (r2max);
1963 unsigned int bits = wi::get_precision (x: r2max) - lzcount;
1964 if (r2max == wi::lshift (x: 1, y: bits))
1965 bits--;
1966 *mask = wi::mask <widest_int> (width: bits, negate_p: false);
1967 *val = 0;
1968 }
1969 }
1970 }
1971 }
1972 break;
1973
1974 case EXACT_DIV_EXPR:
1975 case TRUNC_DIV_EXPR:
1976 {
1977 widest_int r1max = r1val | r1mask;
1978 widest_int r2max = r2val | r2mask;
1979 if (r2mask == 0
1980 && (code == EXACT_DIV_EXPR
1981 || sgn == UNSIGNED
1982 || !wi::neg_p (x: r1max)))
1983 {
1984 widest_int shift = wi::exact_log2 (r2val);
1985 if (shift != -1)
1986 {
1987 // Handle division by a power of 2 as an rshift.
1988 bit_value_binop (code: RSHIFT_EXPR, sgn, width, val, mask,
1989 r1type_sgn, r1type_precision, r1val, r1mask,
1990 r2type_sgn, r2type_precision, r2val: shift, r2mask);
1991 return;
1992 }
1993 }
1994 if (sgn == UNSIGNED
1995 || (!wi::neg_p (x: r1max) && !wi::neg_p (x: r2max)))
1996 {
1997 /* Confirm R2 has some bits set, to avoid division by zero. */
1998 widest_int r2min = wi::bit_and_not (x: r2val, y: r2mask);
1999 if (r2min != 0)
2000 {
2001 /* R1 / R2 is zero if R1 is always less than R2. */
2002 if (wi::ltu_p (x: r1max, y: r2min))
2003 {
2004 *mask = 0;
2005 *val = 0;
2006 }
2007 else
2008 {
2009 widest_int upper
2010 = wi::udiv_trunc (x: wi::zext (x: r1max, offset: width), y: r2min);
2011 unsigned int lzcount = wi::clz (upper);
2012 unsigned int bits = wi::get_precision (x: upper) - lzcount;
2013 *mask = wi::mask <widest_int> (width: bits, negate_p: false);
2014 *val = 0;
2015 }
2016 }
2017 }
2018 }
2019 break;
2020
2021 default:;
2022 }
2023}
2024
2025/* Return the propagation value when applying the operation CODE to
2026 the value RHS yielding type TYPE. */
2027
2028static ccp_prop_value_t
2029bit_value_unop (enum tree_code code, tree type, tree rhs)
2030{
2031 ccp_prop_value_t rval = get_value_for_expr (expr: rhs, for_bits_p: true);
2032 widest_int value, mask;
2033 ccp_prop_value_t val;
2034
2035 if (rval.lattice_val == UNDEFINED)
2036 return rval;
2037
2038 gcc_assert ((rval.lattice_val == CONSTANT
2039 && TREE_CODE (rval.value) == INTEGER_CST)
2040 || wi::sext (rval.mask, TYPE_PRECISION (TREE_TYPE (rhs))) == -1);
2041 bit_value_unop (code, TYPE_SIGN (type), TYPE_PRECISION (type), val: &value, mask: &mask,
2042 TYPE_SIGN (TREE_TYPE (rhs)), TYPE_PRECISION (TREE_TYPE (rhs)),
2043 rval: value_to_wide_int (val: rval), rmask: rval.mask);
2044 if (wi::sext (x: mask, TYPE_PRECISION (type)) != -1)
2045 {
2046 val.lattice_val = CONSTANT;
2047 val.mask = mask;
2048 /* ??? Delay building trees here. */
2049 val.value = wide_int_to_tree (type, cst: value);
2050 }
2051 else
2052 {
2053 val.lattice_val = VARYING;
2054 val.value = NULL_TREE;
2055 val.mask = -1;
2056 }
2057 return val;
2058}
2059
2060/* Return the propagation value when applying the operation CODE to
2061 the values RHS1 and RHS2 yielding type TYPE. */
2062
2063static ccp_prop_value_t
2064bit_value_binop (enum tree_code code, tree type, tree rhs1, tree rhs2)
2065{
2066 ccp_prop_value_t r1val = get_value_for_expr (expr: rhs1, for_bits_p: true);
2067 ccp_prop_value_t r2val = get_value_for_expr (expr: rhs2, for_bits_p: true);
2068 widest_int value, mask;
2069 ccp_prop_value_t val;
2070
2071 if (r1val.lattice_val == UNDEFINED
2072 || r2val.lattice_val == UNDEFINED)
2073 {
2074 val.lattice_val = VARYING;
2075 val.value = NULL_TREE;
2076 val.mask = -1;
2077 return val;
2078 }
2079
2080 gcc_assert ((r1val.lattice_val == CONSTANT
2081 && TREE_CODE (r1val.value) == INTEGER_CST)
2082 || wi::sext (r1val.mask,
2083 TYPE_PRECISION (TREE_TYPE (rhs1))) == -1);
2084 gcc_assert ((r2val.lattice_val == CONSTANT
2085 && TREE_CODE (r2val.value) == INTEGER_CST)
2086 || wi::sext (r2val.mask,
2087 TYPE_PRECISION (TREE_TYPE (rhs2))) == -1);
2088 bit_value_binop (code, TYPE_SIGN (type), TYPE_PRECISION (type), val: &value, mask: &mask,
2089 TYPE_SIGN (TREE_TYPE (rhs1)), TYPE_PRECISION (TREE_TYPE (rhs1)),
2090 r1val: value_to_wide_int (val: r1val), r1mask: r1val.mask,
2091 TYPE_SIGN (TREE_TYPE (rhs2)), TYPE_PRECISION (TREE_TYPE (rhs2)),
2092 r2val: value_to_wide_int (val: r2val), r2mask: r2val.mask);
2093
2094 /* (x * x) & 2 == 0. */
2095 if (code == MULT_EXPR && rhs1 == rhs2 && TYPE_PRECISION (type) > 1)
2096 {
2097 widest_int m = 2;
2098 if (wi::sext (x: mask, TYPE_PRECISION (type)) != -1)
2099 value = wi::bit_and_not (x: value, y: m);
2100 else
2101 value = 0;
2102 mask = wi::bit_and_not (x: mask, y: m);
2103 }
2104
2105 if (wi::sext (x: mask, TYPE_PRECISION (type)) != -1)
2106 {
2107 val.lattice_val = CONSTANT;
2108 val.mask = mask;
2109 /* ??? Delay building trees here. */
2110 val.value = wide_int_to_tree (type, cst: value);
2111 }
2112 else
2113 {
2114 val.lattice_val = VARYING;
2115 val.value = NULL_TREE;
2116 val.mask = -1;
2117 }
2118 return val;
2119}
2120
2121/* Return the propagation value for __builtin_assume_aligned
2122 and functions with assume_aligned or alloc_aligned attribute.
2123 For __builtin_assume_aligned, ATTR is NULL_TREE,
2124 for assume_aligned attribute ATTR is non-NULL and ALLOC_ALIGNED
2125 is false, for alloc_aligned attribute ATTR is non-NULL and
2126 ALLOC_ALIGNED is true. */
2127
2128static ccp_prop_value_t
2129bit_value_assume_aligned (gimple *stmt, tree attr, ccp_prop_value_t ptrval,
2130 bool alloc_aligned)
2131{
2132 tree align, misalign = NULL_TREE, type;
2133 unsigned HOST_WIDE_INT aligni, misaligni = 0;
2134 ccp_prop_value_t alignval;
2135 widest_int value, mask;
2136 ccp_prop_value_t val;
2137
2138 if (attr == NULL_TREE)
2139 {
2140 tree ptr = gimple_call_arg (gs: stmt, index: 0);
2141 type = TREE_TYPE (ptr);
2142 ptrval = get_value_for_expr (expr: ptr, for_bits_p: true);
2143 }
2144 else
2145 {
2146 tree lhs = gimple_call_lhs (gs: stmt);
2147 type = TREE_TYPE (lhs);
2148 }
2149
2150 if (ptrval.lattice_val == UNDEFINED)
2151 return ptrval;
2152 gcc_assert ((ptrval.lattice_val == CONSTANT
2153 && TREE_CODE (ptrval.value) == INTEGER_CST)
2154 || wi::sext (ptrval.mask, TYPE_PRECISION (type)) == -1);
2155 if (attr == NULL_TREE)
2156 {
2157 /* Get aligni and misaligni from __builtin_assume_aligned. */
2158 align = gimple_call_arg (gs: stmt, index: 1);
2159 if (!tree_fits_uhwi_p (align))
2160 return ptrval;
2161 aligni = tree_to_uhwi (align);
2162 if (gimple_call_num_args (gs: stmt) > 2)
2163 {
2164 misalign = gimple_call_arg (gs: stmt, index: 2);
2165 if (!tree_fits_uhwi_p (misalign))
2166 return ptrval;
2167 misaligni = tree_to_uhwi (misalign);
2168 }
2169 }
2170 else
2171 {
2172 /* Get aligni and misaligni from assume_aligned or
2173 alloc_align attributes. */
2174 if (TREE_VALUE (attr) == NULL_TREE)
2175 return ptrval;
2176 attr = TREE_VALUE (attr);
2177 align = TREE_VALUE (attr);
2178 if (!tree_fits_uhwi_p (align))
2179 return ptrval;
2180 aligni = tree_to_uhwi (align);
2181 if (alloc_aligned)
2182 {
2183 if (aligni == 0 || aligni > gimple_call_num_args (gs: stmt))
2184 return ptrval;
2185 align = gimple_call_arg (gs: stmt, index: aligni - 1);
2186 if (!tree_fits_uhwi_p (align))
2187 return ptrval;
2188 aligni = tree_to_uhwi (align);
2189 }
2190 else if (TREE_CHAIN (attr) && TREE_VALUE (TREE_CHAIN (attr)))
2191 {
2192 misalign = TREE_VALUE (TREE_CHAIN (attr));
2193 if (!tree_fits_uhwi_p (misalign))
2194 return ptrval;
2195 misaligni = tree_to_uhwi (misalign);
2196 }
2197 }
2198 if (aligni <= 1 || (aligni & (aligni - 1)) != 0 || misaligni >= aligni)
2199 return ptrval;
2200
2201 align = build_int_cst_type (type, -aligni);
2202 alignval = get_value_for_expr (expr: align, for_bits_p: true);
2203 bit_value_binop (code: BIT_AND_EXPR, TYPE_SIGN (type), TYPE_PRECISION (type), val: &value, mask: &mask,
2204 TYPE_SIGN (type), TYPE_PRECISION (type), r1val: value_to_wide_int (val: ptrval), r1mask: ptrval.mask,
2205 TYPE_SIGN (type), TYPE_PRECISION (type), r2val: value_to_wide_int (val: alignval), r2mask: alignval.mask);
2206
2207 if (wi::sext (x: mask, TYPE_PRECISION (type)) != -1)
2208 {
2209 val.lattice_val = CONSTANT;
2210 val.mask = mask;
2211 gcc_assert ((mask.to_uhwi () & (aligni - 1)) == 0);
2212 gcc_assert ((value.to_uhwi () & (aligni - 1)) == 0);
2213 value |= misaligni;
2214 /* ??? Delay building trees here. */
2215 val.value = wide_int_to_tree (type, cst: value);
2216 }
2217 else
2218 {
2219 val.lattice_val = VARYING;
2220 val.value = NULL_TREE;
2221 val.mask = -1;
2222 }
2223 return val;
2224}
2225
2226/* Evaluate statement STMT.
2227 Valid only for assignments, calls, conditionals, and switches. */
2228
2229static ccp_prop_value_t
2230evaluate_stmt (gimple *stmt)
2231{
2232 ccp_prop_value_t val;
2233 tree simplified = NULL_TREE;
2234 ccp_lattice_t likelyvalue = likely_value (stmt);
2235 bool is_constant = false;
2236 unsigned int align;
2237 bool ignore_return_flags = false;
2238
2239 if (dump_file && (dump_flags & TDF_DETAILS))
2240 {
2241 fprintf (stream: dump_file, format: "which is likely ");
2242 switch (likelyvalue)
2243 {
2244 case CONSTANT:
2245 fprintf (stream: dump_file, format: "CONSTANT");
2246 break;
2247 case UNDEFINED:
2248 fprintf (stream: dump_file, format: "UNDEFINED");
2249 break;
2250 case VARYING:
2251 fprintf (stream: dump_file, format: "VARYING");
2252 break;
2253 default:;
2254 }
2255 fprintf (stream: dump_file, format: "\n");
2256 }
2257
2258 /* If the statement is likely to have a CONSTANT result, then try
2259 to fold the statement to determine the constant value. */
2260 /* FIXME. This is the only place that we call ccp_fold.
2261 Since likely_value never returns CONSTANT for calls, we will
2262 not attempt to fold them, including builtins that may profit. */
2263 if (likelyvalue == CONSTANT)
2264 {
2265 fold_defer_overflow_warnings ();
2266 simplified = ccp_fold (stmt);
2267 if (simplified
2268 && TREE_CODE (simplified) == SSA_NAME)
2269 {
2270 /* We may not use values of something that may be simulated again,
2271 see valueize_op_1. */
2272 if (SSA_NAME_IS_DEFAULT_DEF (simplified)
2273 || ! prop_simulate_again_p (SSA_NAME_DEF_STMT (simplified)))
2274 {
2275 ccp_prop_value_t *val = get_value (var: simplified);
2276 if (val && val->lattice_val != VARYING)
2277 {
2278 fold_undefer_overflow_warnings (true, stmt, 0);
2279 return *val;
2280 }
2281 }
2282 else
2283 /* We may also not place a non-valueized copy in the lattice
2284 as that might become stale if we never re-visit this stmt. */
2285 simplified = NULL_TREE;
2286 }
2287 is_constant = simplified && is_gimple_min_invariant (simplified);
2288 fold_undefer_overflow_warnings (is_constant, stmt, 0);
2289 if (is_constant)
2290 {
2291 /* The statement produced a constant value. */
2292 val.lattice_val = CONSTANT;
2293 val.value = simplified;
2294 val.mask = 0;
2295 return val;
2296 }
2297 }
2298 /* If the statement is likely to have a VARYING result, then do not
2299 bother folding the statement. */
2300 else if (likelyvalue == VARYING)
2301 {
2302 enum gimple_code code = gimple_code (g: stmt);
2303 if (code == GIMPLE_ASSIGN)
2304 {
2305 enum tree_code subcode = gimple_assign_rhs_code (gs: stmt);
2306
2307 /* Other cases cannot satisfy is_gimple_min_invariant
2308 without folding. */
2309 if (get_gimple_rhs_class (code: subcode) == GIMPLE_SINGLE_RHS)
2310 simplified = gimple_assign_rhs1 (gs: stmt);
2311 }
2312 else if (code == GIMPLE_SWITCH)
2313 simplified = gimple_switch_index (gs: as_a <gswitch *> (p: stmt));
2314 else
2315 /* These cannot satisfy is_gimple_min_invariant without folding. */
2316 gcc_assert (code == GIMPLE_CALL || code == GIMPLE_COND);
2317 is_constant = simplified && is_gimple_min_invariant (simplified);
2318 if (is_constant)
2319 {
2320 /* The statement produced a constant value. */
2321 val.lattice_val = CONSTANT;
2322 val.value = simplified;
2323 val.mask = 0;
2324 }
2325 }
2326 /* If the statement result is likely UNDEFINED, make it so. */
2327 else if (likelyvalue == UNDEFINED)
2328 {
2329 val.lattice_val = UNDEFINED;
2330 val.value = NULL_TREE;
2331 val.mask = 0;
2332 return val;
2333 }
2334
2335 /* Resort to simplification for bitwise tracking. */
2336 if (flag_tree_bit_ccp
2337 && (likelyvalue == CONSTANT || is_gimple_call (gs: stmt)
2338 || (gimple_assign_single_p (gs: stmt)
2339 && gimple_assign_rhs_code (gs: stmt) == ADDR_EXPR))
2340 && !is_constant)
2341 {
2342 enum gimple_code code = gimple_code (g: stmt);
2343 val.lattice_val = VARYING;
2344 val.value = NULL_TREE;
2345 val.mask = -1;
2346 if (code == GIMPLE_ASSIGN)
2347 {
2348 enum tree_code subcode = gimple_assign_rhs_code (gs: stmt);
2349 tree rhs1 = gimple_assign_rhs1 (gs: stmt);
2350 tree lhs = gimple_assign_lhs (gs: stmt);
2351 if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
2352 || POINTER_TYPE_P (TREE_TYPE (lhs)))
2353 && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
2354 || POINTER_TYPE_P (TREE_TYPE (rhs1))))
2355 switch (get_gimple_rhs_class (code: subcode))
2356 {
2357 case GIMPLE_SINGLE_RHS:
2358 val = get_value_for_expr (expr: rhs1, for_bits_p: true);
2359 break;
2360
2361 case GIMPLE_UNARY_RHS:
2362 val = bit_value_unop (code: subcode, TREE_TYPE (lhs), rhs: rhs1);
2363 break;
2364
2365 case GIMPLE_BINARY_RHS:
2366 val = bit_value_binop (code: subcode, TREE_TYPE (lhs), rhs1,
2367 rhs2: gimple_assign_rhs2 (gs: stmt));
2368 break;
2369
2370 default:;
2371 }
2372 }
2373 else if (code == GIMPLE_COND)
2374 {
2375 enum tree_code code = gimple_cond_code (gs: stmt);
2376 tree rhs1 = gimple_cond_lhs (gs: stmt);
2377 tree rhs2 = gimple_cond_rhs (gs: stmt);
2378 if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
2379 || POINTER_TYPE_P (TREE_TYPE (rhs1)))
2380 val = bit_value_binop (code, TREE_TYPE (rhs1), rhs1, rhs2);
2381 }
2382 else if (gimple_call_builtin_p (stmt, BUILT_IN_NORMAL))
2383 {
2384 tree fndecl = gimple_call_fndecl (gs: stmt);
2385 switch (DECL_FUNCTION_CODE (decl: fndecl))
2386 {
2387 case BUILT_IN_MALLOC:
2388 case BUILT_IN_REALLOC:
2389 case BUILT_IN_GOMP_REALLOC:
2390 case BUILT_IN_CALLOC:
2391 case BUILT_IN_STRDUP:
2392 case BUILT_IN_STRNDUP:
2393 val.lattice_val = CONSTANT;
2394 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
2395 val.mask = ~((HOST_WIDE_INT) MALLOC_ABI_ALIGNMENT
2396 / BITS_PER_UNIT - 1);
2397 break;
2398
2399 CASE_BUILT_IN_ALLOCA:
2400 align = (DECL_FUNCTION_CODE (decl: fndecl) == BUILT_IN_ALLOCA
2401 ? BIGGEST_ALIGNMENT
2402 : TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
2403 val.lattice_val = CONSTANT;
2404 val.value = build_int_cst (TREE_TYPE (gimple_get_lhs (stmt)), 0);
2405 val.mask = ~((HOST_WIDE_INT) align / BITS_PER_UNIT - 1);
2406 break;
2407
2408 case BUILT_IN_ASSUME_ALIGNED:
2409 val = bit_value_assume_aligned (stmt, NULL_TREE, ptrval: val, alloc_aligned: false);
2410 ignore_return_flags = true;
2411 break;
2412
2413 case BUILT_IN_ALIGNED_ALLOC:
2414 case BUILT_IN_GOMP_ALLOC:
2415 {
2416 tree align = get_constant_value (var: gimple_call_arg (gs: stmt, index: 0));
2417 if (align
2418 && tree_fits_uhwi_p (align))
2419 {
2420 unsigned HOST_WIDE_INT aligni = tree_to_uhwi (align);
2421 if (aligni > 1
2422 /* align must be power-of-two */
2423 && (aligni & (aligni - 1)) == 0)
2424 {
2425 val.lattice_val = CONSTANT;
2426 val.value = build_int_cst (ptr_type_node, 0);
2427 val.mask = -aligni;
2428 }
2429 }
2430 break;
2431 }
2432
2433 case BUILT_IN_BSWAP16:
2434 case BUILT_IN_BSWAP32:
2435 case BUILT_IN_BSWAP64:
2436 case BUILT_IN_BSWAP128:
2437 val = get_value_for_expr (expr: gimple_call_arg (gs: stmt, index: 0), for_bits_p: true);
2438 if (val.lattice_val == UNDEFINED)
2439 break;
2440 else if (val.lattice_val == CONSTANT
2441 && val.value
2442 && TREE_CODE (val.value) == INTEGER_CST)
2443 {
2444 tree type = TREE_TYPE (gimple_call_lhs (stmt));
2445 int prec = TYPE_PRECISION (type);
2446 wide_int wval = wi::to_wide (t: val.value);
2447 val.value
2448 = wide_int_to_tree (type,
2449 cst: wi::bswap (x: wide_int::from (x: wval, precision: prec,
2450 sgn: UNSIGNED)));
2451 val.mask
2452 = widest_int::from (x: wi::bswap (x: wide_int::from (x: val.mask,
2453 precision: prec,
2454 sgn: UNSIGNED)),
2455 sgn: UNSIGNED);
2456 if (wi::sext (x: val.mask, offset: prec) != -1)
2457 break;
2458 }
2459 val.lattice_val = VARYING;
2460 val.value = NULL_TREE;
2461 val.mask = -1;
2462 break;
2463
2464 default:;
2465 }
2466 }
2467 if (is_gimple_call (gs: stmt) && gimple_call_lhs (gs: stmt))
2468 {
2469 tree fntype = gimple_call_fntype (gs: stmt);
2470 if (fntype)
2471 {
2472 tree attrs = lookup_attribute (attr_name: "assume_aligned",
2473 TYPE_ATTRIBUTES (fntype));
2474 if (attrs)
2475 val = bit_value_assume_aligned (stmt, attr: attrs, ptrval: val, alloc_aligned: false);
2476 attrs = lookup_attribute (attr_name: "alloc_align",
2477 TYPE_ATTRIBUTES (fntype));
2478 if (attrs)
2479 val = bit_value_assume_aligned (stmt, attr: attrs, ptrval: val, alloc_aligned: true);
2480 }
2481 int flags = ignore_return_flags
2482 ? 0 : gimple_call_return_flags (as_a <gcall *> (p: stmt));
2483 if (flags & ERF_RETURNS_ARG
2484 && (flags & ERF_RETURN_ARG_MASK) < gimple_call_num_args (gs: stmt))
2485 {
2486 val = get_value_for_expr
2487 (expr: gimple_call_arg (gs: stmt,
2488 index: flags & ERF_RETURN_ARG_MASK), for_bits_p: true);
2489 }
2490 }
2491 is_constant = (val.lattice_val == CONSTANT);
2492 }
2493
2494 tree lhs = gimple_get_lhs (stmt);
2495 if (flag_tree_bit_ccp
2496 && lhs && TREE_CODE (lhs) == SSA_NAME && !VECTOR_TYPE_P (TREE_TYPE (lhs))
2497 && ((is_constant && TREE_CODE (val.value) == INTEGER_CST)
2498 || !is_constant))
2499 {
2500 tree lhs = gimple_get_lhs (stmt);
2501 wide_int nonzero_bits = get_nonzero_bits (lhs);
2502 if (nonzero_bits != -1)
2503 {
2504 if (!is_constant)
2505 {
2506 val.lattice_val = CONSTANT;
2507 val.value = build_zero_cst (TREE_TYPE (lhs));
2508 val.mask = extend_mask (nonzero_bits, TYPE_SIGN (TREE_TYPE (lhs)));
2509 is_constant = true;
2510 }
2511 else
2512 {
2513 if (wi::bit_and_not (x: wi::to_wide (t: val.value), y: nonzero_bits) != 0)
2514 val.value = wide_int_to_tree (TREE_TYPE (lhs),
2515 cst: nonzero_bits
2516 & wi::to_wide (t: val.value));
2517 if (nonzero_bits == 0)
2518 val.mask = 0;
2519 else
2520 val.mask = val.mask & extend_mask (nonzero_bits,
2521 TYPE_SIGN (TREE_TYPE (lhs)));
2522 }
2523 }
2524 }
2525
2526 /* The statement produced a nonconstant value. */
2527 if (!is_constant)
2528 {
2529 /* The statement produced a copy. */
2530 if (simplified && TREE_CODE (simplified) == SSA_NAME
2531 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (simplified))
2532 {
2533 val.lattice_val = CONSTANT;
2534 val.value = simplified;
2535 val.mask = -1;
2536 }
2537 /* The statement is VARYING. */
2538 else
2539 {
2540 val.lattice_val = VARYING;
2541 val.value = NULL_TREE;
2542 val.mask = -1;
2543 }
2544 }
2545
2546 return val;
2547}
2548
2549typedef hash_table<nofree_ptr_hash<gimple> > gimple_htab;
2550
2551/* Given a BUILT_IN_STACK_SAVE value SAVED_VAL, insert a clobber of VAR before
2552 each matching BUILT_IN_STACK_RESTORE. Mark visited phis in VISITED. */
2553
2554static void
2555insert_clobber_before_stack_restore (tree saved_val, tree var,
2556 gimple_htab **visited)
2557{
2558 gimple *stmt;
2559 gassign *clobber_stmt;
2560 tree clobber;
2561 imm_use_iterator iter;
2562 gimple_stmt_iterator i;
2563 gimple **slot;
2564
2565 FOR_EACH_IMM_USE_STMT (stmt, iter, saved_val)
2566 if (gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE))
2567 {
2568 clobber = build_clobber (TREE_TYPE (var), CLOBBER_STORAGE_END);
2569 clobber_stmt = gimple_build_assign (var, clobber);
2570 /* Manually update the vdef/vuse here. */
2571 gimple_set_vuse (g: clobber_stmt, vuse: gimple_vuse (g: stmt));
2572 gimple_set_vdef (g: clobber_stmt, vdef: make_ssa_name (var: gimple_vop (cfun)));
2573 gimple_set_vuse (g: stmt, vuse: gimple_vdef (g: clobber_stmt));
2574 SSA_NAME_DEF_STMT (gimple_vdef (clobber_stmt)) = clobber_stmt;
2575 update_stmt (s: stmt);
2576 i = gsi_for_stmt (stmt);
2577 gsi_insert_before (&i, clobber_stmt, GSI_SAME_STMT);
2578 }
2579 else if (gimple_code (g: stmt) == GIMPLE_PHI)
2580 {
2581 if (!*visited)
2582 *visited = new gimple_htab (10);
2583
2584 slot = (*visited)->find_slot (value: stmt, insert: INSERT);
2585 if (*slot != NULL)
2586 continue;
2587
2588 *slot = stmt;
2589 insert_clobber_before_stack_restore (saved_val: gimple_phi_result (gs: stmt), var,
2590 visited);
2591 }
2592 else if (gimple_assign_ssa_name_copy_p (stmt))
2593 insert_clobber_before_stack_restore (saved_val: gimple_assign_lhs (gs: stmt), var,
2594 visited);
2595}
2596
2597/* Advance the iterator to the previous non-debug gimple statement in the same
2598 or dominating basic block. */
2599
2600static inline void
2601gsi_prev_dom_bb_nondebug (gimple_stmt_iterator *i)
2602{
2603 basic_block dom;
2604
2605 gsi_prev_nondebug (i);
2606 while (gsi_end_p (i: *i))
2607 {
2608 dom = get_immediate_dominator (CDI_DOMINATORS, gsi_bb (i: *i));
2609 if (dom == NULL || dom == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2610 return;
2611
2612 *i = gsi_last_bb (bb: dom);
2613 }
2614}
2615
2616/* Find a BUILT_IN_STACK_SAVE dominating gsi_stmt (I), and insert
2617 a clobber of VAR before each matching BUILT_IN_STACK_RESTORE.
2618
2619 It is possible that BUILT_IN_STACK_SAVE cannot be found in a dominator when
2620 a previous pass (such as DOM) duplicated it along multiple paths to a BB.
2621 In that case the function gives up without inserting the clobbers. */
2622
2623static void
2624insert_clobbers_for_var (gimple_stmt_iterator i, tree var)
2625{
2626 gimple *stmt;
2627 tree saved_val;
2628 gimple_htab *visited = NULL;
2629
2630 for (; !gsi_end_p (i); gsi_prev_dom_bb_nondebug (i: &i))
2631 {
2632 stmt = gsi_stmt (i);
2633
2634 if (!gimple_call_builtin_p (stmt, BUILT_IN_STACK_SAVE))
2635 continue;
2636
2637 saved_val = gimple_call_lhs (gs: stmt);
2638 if (saved_val == NULL_TREE)
2639 continue;
2640
2641 insert_clobber_before_stack_restore (saved_val, var, visited: &visited);
2642 break;
2643 }
2644
2645 delete visited;
2646}
2647
2648/* Detects a __builtin_alloca_with_align with constant size argument. Declares
2649 fixed-size array and returns the address, if found, otherwise returns
2650 NULL_TREE. */
2651
2652static tree
2653fold_builtin_alloca_with_align (gimple *stmt)
2654{
2655 unsigned HOST_WIDE_INT size, threshold, n_elem;
2656 tree lhs, arg, block, var, elem_type, array_type;
2657
2658 /* Get lhs. */
2659 lhs = gimple_call_lhs (gs: stmt);
2660 if (lhs == NULL_TREE)
2661 return NULL_TREE;
2662
2663 /* Detect constant argument. */
2664 arg = get_constant_value (var: gimple_call_arg (gs: stmt, index: 0));
2665 if (arg == NULL_TREE
2666 || TREE_CODE (arg) != INTEGER_CST
2667 || !tree_fits_uhwi_p (arg))
2668 return NULL_TREE;
2669
2670 size = tree_to_uhwi (arg);
2671
2672 /* Heuristic: don't fold large allocas. */
2673 threshold = (unsigned HOST_WIDE_INT)param_large_stack_frame;
2674 /* In case the alloca is located at function entry, it has the same lifetime
2675 as a declared array, so we allow a larger size. */
2676 block = gimple_block (g: stmt);
2677 if (!(cfun->after_inlining
2678 && block
2679 && TREE_CODE (BLOCK_SUPERCONTEXT (block)) == FUNCTION_DECL))
2680 threshold /= 10;
2681 if (size > threshold)
2682 return NULL_TREE;
2683
2684 /* We have to be able to move points-to info. We used to assert
2685 that we can but IPA PTA might end up with two UIDs here
2686 as it might need to handle more than one instance being
2687 live at the same time. Instead of trying to detect this case
2688 (using the first UID would be OK) just give up for now. */
2689 struct ptr_info_def *pi = SSA_NAME_PTR_INFO (lhs);
2690 unsigned uid = 0;
2691 if (pi != NULL
2692 && !pi->pt.anything
2693 && !pt_solution_singleton_or_null_p (&pi->pt, &uid))
2694 return NULL_TREE;
2695
2696 /* Declare array. */
2697 elem_type = build_nonstandard_integer_type (BITS_PER_UNIT, 1);
2698 n_elem = size * 8 / BITS_PER_UNIT;
2699 array_type = build_array_type_nelts (elem_type, n_elem);
2700
2701 if (tree ssa_name = SSA_NAME_IDENTIFIER (lhs))
2702 {
2703 /* Give the temporary a name derived from the name of the VLA
2704 declaration so it can be referenced in diagnostics. */
2705 const char *name = IDENTIFIER_POINTER (ssa_name);
2706 var = create_tmp_var (array_type, name);
2707 }
2708 else
2709 var = create_tmp_var (array_type);
2710
2711 if (gimple *lhsdef = SSA_NAME_DEF_STMT (lhs))
2712 {
2713 /* Set the temporary's location to that of the VLA declaration
2714 so it can be pointed to in diagnostics. */
2715 location_t loc = gimple_location (g: lhsdef);
2716 DECL_SOURCE_LOCATION (var) = loc;
2717 }
2718
2719 SET_DECL_ALIGN (var, TREE_INT_CST_LOW (gimple_call_arg (stmt, 1)));
2720 if (uid != 0)
2721 SET_DECL_PT_UID (var, uid);
2722
2723 /* Fold alloca to the address of the array. */
2724 return fold_convert (TREE_TYPE (lhs), build_fold_addr_expr (var));
2725}
2726
2727/* Fold the stmt at *GSI with CCP specific information that propagating
2728 and regular folding does not catch. */
2729
2730bool
2731ccp_folder::fold_stmt (gimple_stmt_iterator *gsi)
2732{
2733 gimple *stmt = gsi_stmt (i: *gsi);
2734
2735 switch (gimple_code (g: stmt))
2736 {
2737 case GIMPLE_COND:
2738 {
2739 gcond *cond_stmt = as_a <gcond *> (p: stmt);
2740 ccp_prop_value_t val;
2741 /* Statement evaluation will handle type mismatches in constants
2742 more gracefully than the final propagation. This allows us to
2743 fold more conditionals here. */
2744 val = evaluate_stmt (stmt);
2745 if (val.lattice_val != CONSTANT
2746 || val.mask != 0)
2747 return false;
2748
2749 if (dump_file)
2750 {
2751 fprintf (stream: dump_file, format: "Folding predicate ");
2752 print_gimple_expr (dump_file, stmt, 0);
2753 fprintf (stream: dump_file, format: " to ");
2754 print_generic_expr (dump_file, val.value);
2755 fprintf (stream: dump_file, format: "\n");
2756 }
2757
2758 if (integer_zerop (val.value))
2759 gimple_cond_make_false (gs: cond_stmt);
2760 else
2761 gimple_cond_make_true (gs: cond_stmt);
2762
2763 return true;
2764 }
2765
2766 case GIMPLE_CALL:
2767 {
2768 tree lhs = gimple_call_lhs (gs: stmt);
2769 int flags = gimple_call_flags (stmt);
2770 tree val;
2771 tree argt;
2772 bool changed = false;
2773 unsigned i;
2774
2775 /* If the call was folded into a constant make sure it goes
2776 away even if we cannot propagate into all uses because of
2777 type issues. */
2778 if (lhs
2779 && TREE_CODE (lhs) == SSA_NAME
2780 && (val = get_constant_value (var: lhs))
2781 /* Don't optimize away calls that have side-effects. */
2782 && (flags & (ECF_CONST|ECF_PURE)) != 0
2783 && (flags & ECF_LOOPING_CONST_OR_PURE) == 0)
2784 {
2785 tree new_rhs = unshare_expr (val);
2786 if (!useless_type_conversion_p (TREE_TYPE (lhs),
2787 TREE_TYPE (new_rhs)))
2788 new_rhs = fold_convert (TREE_TYPE (lhs), new_rhs);
2789 gimplify_and_update_call_from_tree (gsi, new_rhs);
2790 return true;
2791 }
2792
2793 /* Internal calls provide no argument types, so the extra laxity
2794 for normal calls does not apply. */
2795 if (gimple_call_internal_p (gs: stmt))
2796 return false;
2797
2798 /* The heuristic of fold_builtin_alloca_with_align differs before and
2799 after inlining, so we don't require the arg to be changed into a
2800 constant for folding, but just to be constant. */
2801 if (gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN)
2802 || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN_AND_MAX))
2803 {
2804 tree new_rhs = fold_builtin_alloca_with_align (stmt);
2805 if (new_rhs)
2806 {
2807 gimplify_and_update_call_from_tree (gsi, new_rhs);
2808 tree var = TREE_OPERAND (TREE_OPERAND (new_rhs, 0),0);
2809 insert_clobbers_for_var (i: *gsi, var);
2810 return true;
2811 }
2812 }
2813
2814 /* If there's no extra info from an assume_aligned call,
2815 drop it so it doesn't act as otherwise useless dataflow
2816 barrier. */
2817 if (gimple_call_builtin_p (stmt, BUILT_IN_ASSUME_ALIGNED))
2818 {
2819 tree ptr = gimple_call_arg (gs: stmt, index: 0);
2820 ccp_prop_value_t ptrval = get_value_for_expr (expr: ptr, for_bits_p: true);
2821 if (ptrval.lattice_val == CONSTANT
2822 && TREE_CODE (ptrval.value) == INTEGER_CST
2823 && ptrval.mask != 0)
2824 {
2825 ccp_prop_value_t val
2826 = bit_value_assume_aligned (stmt, NULL_TREE, ptrval, alloc_aligned: false);
2827 unsigned int ptralign = least_bit_hwi (x: ptrval.mask.to_uhwi ());
2828 unsigned int align = least_bit_hwi (x: val.mask.to_uhwi ());
2829 if (ptralign == align
2830 && ((TREE_INT_CST_LOW (ptrval.value) & (align - 1))
2831 == (TREE_INT_CST_LOW (val.value) & (align - 1))))
2832 {
2833 replace_call_with_value (gsi, ptr);
2834 return true;
2835 }
2836 }
2837 }
2838
2839 /* Propagate into the call arguments. Compared to replace_uses_in
2840 this can use the argument slot types for type verification
2841 instead of the current argument type. We also can safely
2842 drop qualifiers here as we are dealing with constants anyway. */
2843 argt = TYPE_ARG_TYPES (gimple_call_fntype (stmt));
2844 for (i = 0; i < gimple_call_num_args (gs: stmt) && argt;
2845 ++i, argt = TREE_CHAIN (argt))
2846 {
2847 tree arg = gimple_call_arg (gs: stmt, index: i);
2848 if (TREE_CODE (arg) == SSA_NAME
2849 && (val = get_constant_value (var: arg))
2850 && useless_type_conversion_p
2851 (TYPE_MAIN_VARIANT (TREE_VALUE (argt)),
2852 TYPE_MAIN_VARIANT (TREE_TYPE (val))))
2853 {
2854 gimple_call_set_arg (gs: stmt, index: i, arg: unshare_expr (val));
2855 changed = true;
2856 }
2857 }
2858
2859 return changed;
2860 }
2861
2862 case GIMPLE_ASSIGN:
2863 {
2864 tree lhs = gimple_assign_lhs (gs: stmt);
2865 tree val;
2866
2867 /* If we have a load that turned out to be constant replace it
2868 as we cannot propagate into all uses in all cases. */
2869 if (gimple_assign_single_p (gs: stmt)
2870 && TREE_CODE (lhs) == SSA_NAME
2871 && (val = get_constant_value (var: lhs)))
2872 {
2873 tree rhs = unshare_expr (val);
2874 if (!useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (rhs)))
2875 rhs = fold_build1 (VIEW_CONVERT_EXPR, TREE_TYPE (lhs), rhs);
2876 gimple_assign_set_rhs_from_tree (gsi, rhs);
2877 return true;
2878 }
2879
2880 return false;
2881 }
2882
2883 default:
2884 return false;
2885 }
2886}
2887
2888/* Visit the assignment statement STMT. Set the value of its LHS to the
2889 value computed by the RHS and store LHS in *OUTPUT_P. If STMT
2890 creates virtual definitions, set the value of each new name to that
2891 of the RHS (if we can derive a constant out of the RHS).
2892 Value-returning call statements also perform an assignment, and
2893 are handled here. */
2894
2895static enum ssa_prop_result
2896visit_assignment (gimple *stmt, tree *output_p)
2897{
2898 ccp_prop_value_t val;
2899 enum ssa_prop_result retval = SSA_PROP_NOT_INTERESTING;
2900
2901 tree lhs = gimple_get_lhs (stmt);
2902 if (TREE_CODE (lhs) == SSA_NAME)
2903 {
2904 /* Evaluate the statement, which could be
2905 either a GIMPLE_ASSIGN or a GIMPLE_CALL. */
2906 val = evaluate_stmt (stmt);
2907
2908 /* If STMT is an assignment to an SSA_NAME, we only have one
2909 value to set. */
2910 if (set_lattice_value (var: lhs, new_val: &val))
2911 {
2912 *output_p = lhs;
2913 if (val.lattice_val == VARYING)
2914 retval = SSA_PROP_VARYING;
2915 else
2916 retval = SSA_PROP_INTERESTING;
2917 }
2918 }
2919
2920 return retval;
2921}
2922
2923
2924/* Visit the conditional statement STMT. Return SSA_PROP_INTERESTING
2925 if it can determine which edge will be taken. Otherwise, return
2926 SSA_PROP_VARYING. */
2927
2928static enum ssa_prop_result
2929visit_cond_stmt (gimple *stmt, edge *taken_edge_p)
2930{
2931 ccp_prop_value_t val;
2932 basic_block block;
2933
2934 block = gimple_bb (g: stmt);
2935 val = evaluate_stmt (stmt);
2936 if (val.lattice_val != CONSTANT
2937 || val.mask != 0)
2938 return SSA_PROP_VARYING;
2939
2940 /* Find which edge out of the conditional block will be taken and add it
2941 to the worklist. If no single edge can be determined statically,
2942 return SSA_PROP_VARYING to feed all the outgoing edges to the
2943 propagation engine. */
2944 *taken_edge_p = find_taken_edge (block, val.value);
2945 if (*taken_edge_p)
2946 return SSA_PROP_INTERESTING;
2947 else
2948 return SSA_PROP_VARYING;
2949}
2950
2951
2952/* Evaluate statement STMT. If the statement produces an output value and
2953 its evaluation changes the lattice value of its output, return
2954 SSA_PROP_INTERESTING and set *OUTPUT_P to the SSA_NAME holding the
2955 output value.
2956
2957 If STMT is a conditional branch and we can determine its truth
2958 value, set *TAKEN_EDGE_P accordingly. If STMT produces a varying
2959 value, return SSA_PROP_VARYING. */
2960
2961enum ssa_prop_result
2962ccp_propagate::visit_stmt (gimple *stmt, edge *taken_edge_p, tree *output_p)
2963{
2964 tree def;
2965 ssa_op_iter iter;
2966
2967 if (dump_file && (dump_flags & TDF_DETAILS))
2968 {
2969 fprintf (stream: dump_file, format: "\nVisiting statement:\n");
2970 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
2971 }
2972
2973 switch (gimple_code (g: stmt))
2974 {
2975 case GIMPLE_ASSIGN:
2976 /* If the statement is an assignment that produces a single
2977 output value, evaluate its RHS to see if the lattice value of
2978 its output has changed. */
2979 return visit_assignment (stmt, output_p);
2980
2981 case GIMPLE_CALL:
2982 /* A value-returning call also performs an assignment. */
2983 if (gimple_call_lhs (gs: stmt) != NULL_TREE)
2984 return visit_assignment (stmt, output_p);
2985 break;
2986
2987 case GIMPLE_COND:
2988 case GIMPLE_SWITCH:
2989 /* If STMT is a conditional branch, see if we can determine
2990 which branch will be taken. */
2991 /* FIXME. It appears that we should be able to optimize
2992 computed GOTOs here as well. */
2993 return visit_cond_stmt (stmt, taken_edge_p);
2994
2995 default:
2996 break;
2997 }
2998
2999 /* Any other kind of statement is not interesting for constant
3000 propagation and, therefore, not worth simulating. */
3001 if (dump_file && (dump_flags & TDF_DETAILS))
3002 fprintf (stream: dump_file, format: "No interesting values produced. Marked VARYING.\n");
3003
3004 /* Definitions made by statements other than assignments to
3005 SSA_NAMEs represent unknown modifications to their outputs.
3006 Mark them VARYING. */
3007 FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_ALL_DEFS)
3008 set_value_varying (def);
3009
3010 return SSA_PROP_VARYING;
3011}
3012
3013
3014/* Main entry point for SSA Conditional Constant Propagation. If NONZERO_P,
3015 record nonzero bits. */
3016
3017static unsigned int
3018do_ssa_ccp (bool nonzero_p)
3019{
3020 unsigned int todo = 0;
3021 calculate_dominance_info (CDI_DOMINATORS);
3022
3023 ccp_initialize ();
3024 class ccp_propagate ccp_propagate;
3025 ccp_propagate.ssa_propagate ();
3026 if (ccp_finalize (nonzero_p: nonzero_p || flag_ipa_bit_cp))
3027 {
3028 todo = TODO_cleanup_cfg;
3029
3030 /* ccp_finalize does not preserve loop-closed ssa. */
3031 loops_state_clear (flags: LOOP_CLOSED_SSA);
3032 }
3033
3034 free_dominance_info (CDI_DOMINATORS);
3035 return todo;
3036}
3037
3038
3039namespace {
3040
3041const pass_data pass_data_ccp =
3042{
3043 .type: GIMPLE_PASS, /* type */
3044 .name: "ccp", /* name */
3045 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
3046 .tv_id: TV_TREE_CCP, /* tv_id */
3047 .properties_required: ( PROP_cfg | PROP_ssa ), /* properties_required */
3048 .properties_provided: 0, /* properties_provided */
3049 .properties_destroyed: 0, /* properties_destroyed */
3050 .todo_flags_start: 0, /* todo_flags_start */
3051 TODO_update_address_taken, /* todo_flags_finish */
3052};
3053
3054class pass_ccp : public gimple_opt_pass
3055{
3056public:
3057 pass_ccp (gcc::context *ctxt)
3058 : gimple_opt_pass (pass_data_ccp, ctxt), nonzero_p (false)
3059 {}
3060
3061 /* opt_pass methods: */
3062 opt_pass * clone () final override { return new pass_ccp (m_ctxt); }
3063 void set_pass_param (unsigned int n, bool param) final override
3064 {
3065 gcc_assert (n == 0);
3066 nonzero_p = param;
3067 }
3068 bool gate (function *) final override { return flag_tree_ccp != 0; }
3069 unsigned int execute (function *) final override
3070 {
3071 return do_ssa_ccp (nonzero_p);
3072 }
3073
3074 private:
3075 /* Determines whether the pass instance records nonzero bits. */
3076 bool nonzero_p;
3077}; // class pass_ccp
3078
3079} // anon namespace
3080
3081gimple_opt_pass *
3082make_pass_ccp (gcc::context *ctxt)
3083{
3084 return new pass_ccp (ctxt);
3085}
3086
3087
3088
3089/* Try to optimize out __builtin_stack_restore. Optimize it out
3090 if there is another __builtin_stack_restore in the same basic
3091 block and no calls or ASM_EXPRs are in between, or if this block's
3092 only outgoing edge is to EXIT_BLOCK and there are no calls or
3093 ASM_EXPRs after this __builtin_stack_restore. */
3094
3095static tree
3096optimize_stack_restore (gimple_stmt_iterator i)
3097{
3098 tree callee;
3099 gimple *stmt;
3100
3101 basic_block bb = gsi_bb (i);
3102 gimple *call = gsi_stmt (i);
3103
3104 if (gimple_code (g: call) != GIMPLE_CALL
3105 || gimple_call_num_args (gs: call) != 1
3106 || TREE_CODE (gimple_call_arg (call, 0)) != SSA_NAME
3107 || !POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, 0))))
3108 return NULL_TREE;
3109
3110 for (gsi_next (i: &i); !gsi_end_p (i); gsi_next (i: &i))
3111 {
3112 stmt = gsi_stmt (i);
3113 if (gimple_code (g: stmt) == GIMPLE_ASM)
3114 return NULL_TREE;
3115 if (gimple_code (g: stmt) != GIMPLE_CALL)
3116 continue;
3117
3118 callee = gimple_call_fndecl (gs: stmt);
3119 if (!callee
3120 || !fndecl_built_in_p (node: callee, klass: BUILT_IN_NORMAL)
3121 /* All regular builtins are ok, just obviously not alloca. */
3122 || ALLOCA_FUNCTION_CODE_P (DECL_FUNCTION_CODE (callee))
3123 /* Do not remove stack updates before strub leave. */
3124 || fndecl_built_in_p (node: callee, name1: BUILT_IN___STRUB_LEAVE))
3125 return NULL_TREE;
3126
3127 if (fndecl_built_in_p (node: callee, name1: BUILT_IN_STACK_RESTORE))
3128 goto second_stack_restore;
3129 }
3130
3131 if (!gsi_end_p (i))
3132 return NULL_TREE;
3133
3134 /* Allow one successor of the exit block, or zero successors. */
3135 switch (EDGE_COUNT (bb->succs))
3136 {
3137 case 0:
3138 break;
3139 case 1:
3140 if (single_succ_edge (bb)->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
3141 return NULL_TREE;
3142 break;
3143 default:
3144 return NULL_TREE;
3145 }
3146 second_stack_restore:
3147
3148 /* If there's exactly one use, then zap the call to __builtin_stack_save.
3149 If there are multiple uses, then the last one should remove the call.
3150 In any case, whether the call to __builtin_stack_save can be removed
3151 or not is irrelevant to removing the call to __builtin_stack_restore. */
3152 if (has_single_use (var: gimple_call_arg (gs: call, index: 0)))
3153 {
3154 gimple *stack_save = SSA_NAME_DEF_STMT (gimple_call_arg (call, 0));
3155 if (is_gimple_call (gs: stack_save))
3156 {
3157 callee = gimple_call_fndecl (gs: stack_save);
3158 if (callee && fndecl_built_in_p (node: callee, name1: BUILT_IN_STACK_SAVE))
3159 {
3160 gimple_stmt_iterator stack_save_gsi;
3161 tree rhs;
3162
3163 stack_save_gsi = gsi_for_stmt (stack_save);
3164 rhs = build_int_cst (TREE_TYPE (gimple_call_arg (call, 0)), 0);
3165 replace_call_with_value (&stack_save_gsi, rhs);
3166 }
3167 }
3168 }
3169
3170 /* No effect, so the statement will be deleted. */
3171 return integer_zero_node;
3172}
3173
3174/* If va_list type is a simple pointer and nothing special is needed,
3175 optimize __builtin_va_start (&ap, 0) into ap = __builtin_next_arg (0),
3176 __builtin_va_end (&ap) out as NOP and __builtin_va_copy into a simple
3177 pointer assignment. */
3178
3179static tree
3180optimize_stdarg_builtin (gimple *call)
3181{
3182 tree callee, lhs, rhs, cfun_va_list;
3183 bool va_list_simple_ptr;
3184 location_t loc = gimple_location (g: call);
3185
3186 callee = gimple_call_fndecl (gs: call);
3187
3188 cfun_va_list = targetm.fn_abi_va_list (callee);
3189 va_list_simple_ptr = POINTER_TYPE_P (cfun_va_list)
3190 && (TREE_TYPE (cfun_va_list) == void_type_node
3191 || TREE_TYPE (cfun_va_list) == char_type_node);
3192
3193 switch (DECL_FUNCTION_CODE (decl: callee))
3194 {
3195 case BUILT_IN_VA_START:
3196 if (!va_list_simple_ptr
3197 || targetm.expand_builtin_va_start != NULL
3198 || !builtin_decl_explicit_p (fncode: BUILT_IN_NEXT_ARG))
3199 return NULL_TREE;
3200
3201 if (gimple_call_num_args (gs: call) != 2)
3202 return NULL_TREE;
3203
3204 lhs = gimple_call_arg (gs: call, index: 0);
3205 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
3206 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
3207 != TYPE_MAIN_VARIANT (cfun_va_list))
3208 return NULL_TREE;
3209
3210 lhs = build_fold_indirect_ref_loc (loc, lhs);
3211 rhs = build_call_expr_loc (loc, builtin_decl_explicit (fncode: BUILT_IN_NEXT_ARG),
3212 1, integer_zero_node);
3213 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
3214 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
3215
3216 case BUILT_IN_VA_COPY:
3217 if (!va_list_simple_ptr)
3218 return NULL_TREE;
3219
3220 if (gimple_call_num_args (gs: call) != 2)
3221 return NULL_TREE;
3222
3223 lhs = gimple_call_arg (gs: call, index: 0);
3224 if (!POINTER_TYPE_P (TREE_TYPE (lhs))
3225 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (lhs)))
3226 != TYPE_MAIN_VARIANT (cfun_va_list))
3227 return NULL_TREE;
3228
3229 lhs = build_fold_indirect_ref_loc (loc, lhs);
3230 rhs = gimple_call_arg (gs: call, index: 1);
3231 if (TYPE_MAIN_VARIANT (TREE_TYPE (rhs))
3232 != TYPE_MAIN_VARIANT (cfun_va_list))
3233 return NULL_TREE;
3234
3235 rhs = fold_convert_loc (loc, TREE_TYPE (lhs), rhs);
3236 return build2 (MODIFY_EXPR, TREE_TYPE (lhs), lhs, rhs);
3237
3238 case BUILT_IN_VA_END:
3239 /* No effect, so the statement will be deleted. */
3240 return integer_zero_node;
3241
3242 default:
3243 gcc_unreachable ();
3244 }
3245}
3246
3247/* Attemp to make the block of __builtin_unreachable I unreachable by changing
3248 the incoming jumps. Return true if at least one jump was changed. */
3249
3250static bool
3251optimize_unreachable (gimple_stmt_iterator i)
3252{
3253 basic_block bb = gsi_bb (i);
3254 gimple_stmt_iterator gsi;
3255 gimple *stmt;
3256 edge_iterator ei;
3257 edge e;
3258 bool ret;
3259
3260 if (flag_sanitize & SANITIZE_UNREACHABLE)
3261 return false;
3262
3263 for (gsi = gsi_start_bb (bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi))
3264 {
3265 stmt = gsi_stmt (i: gsi);
3266
3267 if (is_gimple_debug (gs: stmt))
3268 continue;
3269
3270 if (glabel *label_stmt = dyn_cast <glabel *> (p: stmt))
3271 {
3272 /* Verify we do not need to preserve the label. */
3273 if (FORCED_LABEL (gimple_label_label (label_stmt)))
3274 return false;
3275
3276 continue;
3277 }
3278
3279 /* Only handle the case that __builtin_unreachable is the first statement
3280 in the block. We rely on DCE to remove stmts without side-effects
3281 before __builtin_unreachable. */
3282 if (gsi_stmt (i: gsi) != gsi_stmt (i))
3283 return false;
3284 }
3285
3286 ret = false;
3287 FOR_EACH_EDGE (e, ei, bb->preds)
3288 {
3289 gsi = gsi_last_bb (bb: e->src);
3290 if (gsi_end_p (i: gsi))
3291 continue;
3292
3293 stmt = gsi_stmt (i: gsi);
3294 if (gcond *cond_stmt = dyn_cast <gcond *> (p: stmt))
3295 {
3296 if (e->flags & EDGE_TRUE_VALUE)
3297 gimple_cond_make_false (gs: cond_stmt);
3298 else if (e->flags & EDGE_FALSE_VALUE)
3299 gimple_cond_make_true (gs: cond_stmt);
3300 else
3301 gcc_unreachable ();
3302 update_stmt (s: cond_stmt);
3303 }
3304 else
3305 {
3306 /* Todo: handle other cases. Note that unreachable switch case
3307 statements have already been removed. */
3308 continue;
3309 }
3310
3311 ret = true;
3312 }
3313
3314 return ret;
3315}
3316
3317/* Convert
3318 _1 = __atomic_fetch_or_* (ptr_6, 1, _3);
3319 _7 = ~_1;
3320 _5 = (_Bool) _7;
3321 to
3322 _1 = __atomic_fetch_or_* (ptr_6, 1, _3);
3323 _8 = _1 & 1;
3324 _5 = _8 == 0;
3325 and convert
3326 _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);
3327 _7 = ~_1;
3328 _4 = (_Bool) _7;
3329 to
3330 _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);
3331 _8 = _1 & 1;
3332 _4 = (_Bool) _8;
3333
3334 USE_STMT is the gimplt statement which uses the return value of
3335 __atomic_fetch_or_*. LHS is the return value of __atomic_fetch_or_*.
3336 MASK is the mask passed to __atomic_fetch_or_*.
3337 */
3338
3339static gimple *
3340convert_atomic_bit_not (enum internal_fn fn, gimple *use_stmt,
3341 tree lhs, tree mask)
3342{
3343 tree and_mask;
3344 if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
3345 {
3346 /* MASK must be ~1. */
3347 if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs),
3348 ~HOST_WIDE_INT_1), mask, flags: 0))
3349 return nullptr;
3350 and_mask = build_int_cst (TREE_TYPE (lhs), 1);
3351 }
3352 else
3353 {
3354 /* MASK must be 1. */
3355 if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs), 1), mask, flags: 0))
3356 return nullptr;
3357 and_mask = mask;
3358 }
3359
3360 tree use_lhs = gimple_assign_lhs (gs: use_stmt);
3361
3362 use_operand_p use_p;
3363 gimple *use_not_stmt;
3364
3365 if (!single_imm_use (var: use_lhs, use_p: &use_p, stmt: &use_not_stmt)
3366 || !is_gimple_assign (gs: use_not_stmt))
3367 return nullptr;
3368
3369 if (!CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (use_not_stmt)))
3370 return nullptr;
3371
3372 tree use_not_lhs = gimple_assign_lhs (gs: use_not_stmt);
3373 if (TREE_CODE (TREE_TYPE (use_not_lhs)) != BOOLEAN_TYPE)
3374 return nullptr;
3375
3376 gimple_stmt_iterator gsi;
3377 tree var = make_ssa_name (TREE_TYPE (lhs));
3378 /* use_stmt need to be removed after use_nop_stmt,
3379 so use_lhs can be released. */
3380 gimple *use_stmt_removal = use_stmt;
3381 use_stmt = gimple_build_assign (var, BIT_AND_EXPR, lhs, and_mask);
3382 gsi = gsi_for_stmt (use_not_stmt);
3383 gsi_insert_before (&gsi, use_stmt, GSI_NEW_STMT);
3384 lhs = gimple_assign_lhs (gs: use_not_stmt);
3385 gimple *g = gimple_build_assign (lhs, EQ_EXPR, var,
3386 build_zero_cst (TREE_TYPE (mask)));
3387 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
3388 gsi = gsi_for_stmt (use_not_stmt);
3389 gsi_remove (&gsi, true);
3390 gsi = gsi_for_stmt (use_stmt_removal);
3391 gsi_remove (&gsi, true);
3392 return use_stmt;
3393}
3394
3395/* match.pd function to match atomic_bit_test_and pattern which
3396 has nop_convert:
3397 _1 = __atomic_fetch_or_4 (&v, 1, 0);
3398 _2 = (int) _1;
3399 _5 = _2 & 1;
3400 */
3401extern bool gimple_nop_atomic_bit_test_and_p (tree, tree *,
3402 tree (*) (tree));
3403extern bool gimple_nop_convert (tree, tree*, tree (*) (tree));
3404
3405/* Optimize
3406 mask_2 = 1 << cnt_1;
3407 _4 = __atomic_fetch_or_* (ptr_6, mask_2, _3);
3408 _5 = _4 & mask_2;
3409 to
3410 _4 = .ATOMIC_BIT_TEST_AND_SET (ptr_6, cnt_1, 0, _3);
3411 _5 = _4;
3412 If _5 is only used in _5 != 0 or _5 == 0 comparisons, 1
3413 is passed instead of 0, and the builtin just returns a zero
3414 or 1 value instead of the actual bit.
3415 Similarly for __sync_fetch_and_or_* (without the ", _3" part
3416 in there), and/or if mask_2 is a power of 2 constant.
3417 Similarly for xor instead of or, use ATOMIC_BIT_TEST_AND_COMPLEMENT
3418 in that case. And similarly for and instead of or, except that
3419 the second argument to the builtin needs to be one's complement
3420 of the mask instead of mask. */
3421
3422static bool
3423optimize_atomic_bit_test_and (gimple_stmt_iterator *gsip,
3424 enum internal_fn fn, bool has_model_arg,
3425 bool after)
3426{
3427 gimple *call = gsi_stmt (i: *gsip);
3428 tree lhs = gimple_call_lhs (gs: call);
3429 use_operand_p use_p;
3430 gimple *use_stmt;
3431 tree mask;
3432 optab optab;
3433
3434 if (!flag_inline_atomics
3435 || optimize_debug
3436 || !gimple_call_builtin_p (call, BUILT_IN_NORMAL)
3437 || !lhs
3438 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
3439 || !single_imm_use (var: lhs, use_p: &use_p, stmt: &use_stmt)
3440 || !is_gimple_assign (gs: use_stmt)
3441 || !gimple_vdef (g: call))
3442 return false;
3443
3444 switch (fn)
3445 {
3446 case IFN_ATOMIC_BIT_TEST_AND_SET:
3447 optab = atomic_bit_test_and_set_optab;
3448 break;
3449 case IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT:
3450 optab = atomic_bit_test_and_complement_optab;
3451 break;
3452 case IFN_ATOMIC_BIT_TEST_AND_RESET:
3453 optab = atomic_bit_test_and_reset_optab;
3454 break;
3455 default:
3456 return false;
3457 }
3458
3459 tree bit = nullptr;
3460
3461 mask = gimple_call_arg (gs: call, index: 1);
3462 tree_code rhs_code = gimple_assign_rhs_code (gs: use_stmt);
3463 if (rhs_code != BIT_AND_EXPR)
3464 {
3465 if (rhs_code != NOP_EXPR && rhs_code != BIT_NOT_EXPR)
3466 return false;
3467
3468 tree use_lhs = gimple_assign_lhs (gs: use_stmt);
3469 if (TREE_CODE (use_lhs) == SSA_NAME
3470 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs))
3471 return false;
3472
3473 tree use_rhs = gimple_assign_rhs1 (gs: use_stmt);
3474 if (lhs != use_rhs)
3475 return false;
3476
3477 if (optab_handler (op: optab, TYPE_MODE (TREE_TYPE (lhs)))
3478 == CODE_FOR_nothing)
3479 return false;
3480
3481 gimple *g;
3482 gimple_stmt_iterator gsi;
3483 tree var;
3484 int ibit = -1;
3485
3486 if (rhs_code == BIT_NOT_EXPR)
3487 {
3488 g = convert_atomic_bit_not (fn, use_stmt, lhs, mask);
3489 if (!g)
3490 return false;
3491 use_stmt = g;
3492 ibit = 0;
3493 }
3494 else if (TREE_CODE (TREE_TYPE (use_lhs)) == BOOLEAN_TYPE)
3495 {
3496 tree and_mask;
3497 if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
3498 {
3499 /* MASK must be ~1. */
3500 if (!operand_equal_p (build_int_cst (TREE_TYPE (lhs),
3501 ~HOST_WIDE_INT_1),
3502 mask, flags: 0))
3503 return false;
3504
3505 /* Convert
3506 _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);
3507 _4 = (_Bool) _1;
3508 to
3509 _1 = __atomic_fetch_and_* (ptr_6, ~1, _3);
3510 _5 = _1 & 1;
3511 _4 = (_Bool) _5;
3512 */
3513 and_mask = build_int_cst (TREE_TYPE (lhs), 1);
3514 }
3515 else
3516 {
3517 and_mask = build_int_cst (TREE_TYPE (lhs), 1);
3518 if (!operand_equal_p (and_mask, mask, flags: 0))
3519 return false;
3520
3521 /* Convert
3522 _1 = __atomic_fetch_or_* (ptr_6, 1, _3);
3523 _4 = (_Bool) _1;
3524 to
3525 _1 = __atomic_fetch_or_* (ptr_6, 1, _3);
3526 _5 = _1 & 1;
3527 _4 = (_Bool) _5;
3528 */
3529 }
3530 var = make_ssa_name (TREE_TYPE (use_rhs));
3531 replace_uses_by (use_rhs, var);
3532 g = gimple_build_assign (var, BIT_AND_EXPR, use_rhs,
3533 and_mask);
3534 gsi = gsi_for_stmt (use_stmt);
3535 gsi_insert_before (&gsi, g, GSI_NEW_STMT);
3536 use_stmt = g;
3537 ibit = 0;
3538 }
3539 else if (TYPE_PRECISION (TREE_TYPE (use_lhs))
3540 <= TYPE_PRECISION (TREE_TYPE (use_rhs)))
3541 {
3542 gimple *use_nop_stmt;
3543 if (!single_imm_use (var: use_lhs, use_p: &use_p, stmt: &use_nop_stmt)
3544 || (!is_gimple_assign (gs: use_nop_stmt)
3545 && gimple_code (g: use_nop_stmt) != GIMPLE_COND))
3546 return false;
3547 /* Handle both
3548 _4 = _5 < 0;
3549 and
3550 if (_5 < 0)
3551 */
3552 tree use_nop_lhs = nullptr;
3553 rhs_code = ERROR_MARK;
3554 if (is_gimple_assign (gs: use_nop_stmt))
3555 {
3556 use_nop_lhs = gimple_assign_lhs (gs: use_nop_stmt);
3557 rhs_code = gimple_assign_rhs_code (gs: use_nop_stmt);
3558 }
3559 if (!use_nop_lhs || rhs_code != BIT_AND_EXPR)
3560 {
3561 /* Also handle
3562 if (_5 < 0)
3563 */
3564 if (use_nop_lhs
3565 && TREE_CODE (use_nop_lhs) == SSA_NAME
3566 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_nop_lhs))
3567 return false;
3568 if (use_nop_lhs && rhs_code == BIT_NOT_EXPR)
3569 {
3570 /* Handle
3571 _7 = ~_2;
3572 */
3573 g = convert_atomic_bit_not (fn, use_stmt: use_nop_stmt, lhs,
3574 mask);
3575 if (!g)
3576 return false;
3577 /* Convert
3578 _1 = __atomic_fetch_or_4 (ptr_6, 1, _3);
3579 _2 = (int) _1;
3580 _7 = ~_2;
3581 _5 = (_Bool) _7;
3582 to
3583 _1 = __atomic_fetch_or_4 (ptr_6, ~1, _3);
3584 _8 = _1 & 1;
3585 _5 = _8 == 0;
3586 and convert
3587 _1 = __atomic_fetch_and_4 (ptr_6, ~1, _3);
3588 _2 = (int) _1;
3589 _7 = ~_2;
3590 _5 = (_Bool) _7;
3591 to
3592 _1 = __atomic_fetch_and_4 (ptr_6, 1, _3);
3593 _8 = _1 & 1;
3594 _5 = _8 == 0;
3595 */
3596 gsi = gsi_for_stmt (use_stmt);
3597 gsi_remove (&gsi, true);
3598 use_stmt = g;
3599 ibit = 0;
3600 }
3601 else
3602 {
3603 tree cmp_rhs1, cmp_rhs2;
3604 if (use_nop_lhs)
3605 {
3606 /* Handle
3607 _4 = _5 < 0;
3608 */
3609 if (TREE_CODE (TREE_TYPE (use_nop_lhs))
3610 != BOOLEAN_TYPE)
3611 return false;
3612 cmp_rhs1 = gimple_assign_rhs1 (gs: use_nop_stmt);
3613 cmp_rhs2 = gimple_assign_rhs2 (gs: use_nop_stmt);
3614 }
3615 else
3616 {
3617 /* Handle
3618 if (_5 < 0)
3619 */
3620 rhs_code = gimple_cond_code (gs: use_nop_stmt);
3621 cmp_rhs1 = gimple_cond_lhs (gs: use_nop_stmt);
3622 cmp_rhs2 = gimple_cond_rhs (gs: use_nop_stmt);
3623 }
3624 if (rhs_code != GE_EXPR && rhs_code != LT_EXPR)
3625 return false;
3626 if (use_lhs != cmp_rhs1)
3627 return false;
3628 if (!integer_zerop (cmp_rhs2))
3629 return false;
3630
3631 tree and_mask;
3632
3633 unsigned HOST_WIDE_INT bytes
3634 = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (use_rhs)));
3635 ibit = bytes * BITS_PER_UNIT - 1;
3636 unsigned HOST_WIDE_INT highest
3637 = HOST_WIDE_INT_1U << ibit;
3638
3639 if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
3640 {
3641 /* Get the signed maximum of the USE_RHS type. */
3642 and_mask = build_int_cst (TREE_TYPE (use_rhs),
3643 highest - 1);
3644 if (!operand_equal_p (and_mask, mask, flags: 0))
3645 return false;
3646
3647 /* Convert
3648 _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);
3649 _5 = (signed int) _1;
3650 _4 = _5 < 0 or _5 >= 0;
3651 to
3652 _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);
3653 _6 = _1 & 0x80000000;
3654 _4 = _6 != 0 or _6 == 0;
3655 and convert
3656 _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);
3657 _5 = (signed int) _1;
3658 if (_5 < 0 or _5 >= 0)
3659 to
3660 _1 = __atomic_fetch_and_4 (ptr_6, 0x7fffffff, _3);
3661 _6 = _1 & 0x80000000;
3662 if (_6 != 0 or _6 == 0)
3663 */
3664 and_mask = build_int_cst (TREE_TYPE (use_rhs),
3665 highest);
3666 }
3667 else
3668 {
3669 /* Get the signed minimum of the USE_RHS type. */
3670 and_mask = build_int_cst (TREE_TYPE (use_rhs),
3671 highest);
3672 if (!operand_equal_p (and_mask, mask, flags: 0))
3673 return false;
3674
3675 /* Convert
3676 _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);
3677 _5 = (signed int) _1;
3678 _4 = _5 < 0 or _5 >= 0;
3679 to
3680 _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);
3681 _6 = _1 & 0x80000000;
3682 _4 = _6 != 0 or _6 == 0;
3683 and convert
3684 _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);
3685 _5 = (signed int) _1;
3686 if (_5 < 0 or _5 >= 0)
3687 to
3688 _1 = __atomic_fetch_or_4 (ptr_6, 0x80000000, _3);
3689 _6 = _1 & 0x80000000;
3690 if (_6 != 0 or _6 == 0)
3691 */
3692 }
3693 var = make_ssa_name (TREE_TYPE (use_rhs));
3694 gimple* use_stmt_removal = use_stmt;
3695 g = gimple_build_assign (var, BIT_AND_EXPR, use_rhs,
3696 and_mask);
3697 gsi = gsi_for_stmt (use_nop_stmt);
3698 gsi_insert_before (&gsi, g, GSI_NEW_STMT);
3699 use_stmt = g;
3700 rhs_code = rhs_code == GE_EXPR ? EQ_EXPR : NE_EXPR;
3701 tree const_zero = build_zero_cst (TREE_TYPE (use_rhs));
3702 if (use_nop_lhs)
3703 g = gimple_build_assign (use_nop_lhs, rhs_code,
3704 var, const_zero);
3705 else
3706 g = gimple_build_cond (rhs_code, var, const_zero,
3707 nullptr, nullptr);
3708 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
3709 gsi = gsi_for_stmt (use_nop_stmt);
3710 gsi_remove (&gsi, true);
3711 gsi = gsi_for_stmt (use_stmt_removal);
3712 gsi_remove (&gsi, true);
3713 }
3714 }
3715 else
3716 {
3717 tree match_op[3];
3718 gimple *g;
3719 if (!gimple_nop_atomic_bit_test_and_p (use_nop_lhs,
3720 &match_op[0], NULL)
3721 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (match_op[2])
3722 || !single_imm_use (var: match_op[2], use_p: &use_p, stmt: &g)
3723 || !is_gimple_assign (gs: g))
3724 return false;
3725 mask = match_op[0];
3726 if (TREE_CODE (match_op[1]) == INTEGER_CST)
3727 {
3728 ibit = tree_log2 (match_op[1]);
3729 gcc_assert (ibit >= 0);
3730 }
3731 else
3732 {
3733 g = SSA_NAME_DEF_STMT (match_op[1]);
3734 gcc_assert (is_gimple_assign (g));
3735 bit = gimple_assign_rhs2 (gs: g);
3736 }
3737 /* Convert
3738 _1 = __atomic_fetch_or_4 (ptr_6, mask, _3);
3739 _2 = (int) _1;
3740 _5 = _2 & mask;
3741 to
3742 _1 = __atomic_fetch_or_4 (ptr_6, mask, _3);
3743 _6 = _1 & mask;
3744 _5 = (int) _6;
3745 and convert
3746 _1 = ~mask_7;
3747 _2 = (unsigned int) _1;
3748 _3 = __atomic_fetch_and_4 (ptr_6, _2, 0);
3749 _4 = (int) _3;
3750 _5 = _4 & mask_7;
3751 to
3752 _1 = __atomic_fetch_and_* (ptr_6, ~mask_7, _3);
3753 _12 = _3 & mask_7;
3754 _5 = (int) _12;
3755
3756 and Convert
3757 _1 = __atomic_fetch_and_4 (ptr_6, ~mask, _3);
3758 _2 = (short int) _1;
3759 _5 = _2 & mask;
3760 to
3761 _1 = __atomic_fetch_and_4 (ptr_6, ~mask, _3);
3762 _8 = _1 & mask;
3763 _5 = (short int) _8;
3764 */
3765 gimple_seq stmts = NULL;
3766 match_op[1] = gimple_convert (seq: &stmts,
3767 TREE_TYPE (use_rhs),
3768 op: match_op[1]);
3769 var = gimple_build (seq: &stmts, code: BIT_AND_EXPR,
3770 TREE_TYPE (use_rhs), ops: use_rhs, ops: match_op[1]);
3771 gsi = gsi_for_stmt (use_stmt);
3772 gsi_remove (&gsi, true);
3773 release_defs (use_stmt);
3774 use_stmt = gimple_seq_last_stmt (s: stmts);
3775 gsi = gsi_for_stmt (use_nop_stmt);
3776 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
3777 gimple_assign_set_rhs_with_ops (gsi: &gsi, code: CONVERT_EXPR, op1: var);
3778 update_stmt (s: use_nop_stmt);
3779 }
3780 }
3781 else
3782 return false;
3783
3784 if (!bit)
3785 {
3786 if (ibit < 0)
3787 gcc_unreachable ();
3788 bit = build_int_cst (TREE_TYPE (lhs), ibit);
3789 }
3790 }
3791 else if (optab_handler (op: optab, TYPE_MODE (TREE_TYPE (lhs)))
3792 == CODE_FOR_nothing)
3793 return false;
3794
3795 tree use_lhs = gimple_assign_lhs (gs: use_stmt);
3796 if (!use_lhs)
3797 return false;
3798
3799 if (!bit)
3800 {
3801 if (TREE_CODE (mask) == INTEGER_CST)
3802 {
3803 if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
3804 mask = const_unop (BIT_NOT_EXPR, TREE_TYPE (mask), mask);
3805 mask = fold_convert (TREE_TYPE (lhs), mask);
3806 int ibit = tree_log2 (mask);
3807 if (ibit < 0)
3808 return false;
3809 bit = build_int_cst (TREE_TYPE (lhs), ibit);
3810 }
3811 else if (TREE_CODE (mask) == SSA_NAME)
3812 {
3813 gimple *g = SSA_NAME_DEF_STMT (mask);
3814 tree match_op;
3815 if (gimple_nop_convert (mask, &match_op, NULL))
3816 {
3817 mask = match_op;
3818 if (TREE_CODE (mask) != SSA_NAME)
3819 return false;
3820 g = SSA_NAME_DEF_STMT (mask);
3821 }
3822 if (!is_gimple_assign (gs: g))
3823 return false;
3824
3825 if (fn == IFN_ATOMIC_BIT_TEST_AND_RESET)
3826 {
3827 if (gimple_assign_rhs_code (gs: g) != BIT_NOT_EXPR)
3828 return false;
3829 mask = gimple_assign_rhs1 (gs: g);
3830 if (TREE_CODE (mask) != SSA_NAME)
3831 return false;
3832 g = SSA_NAME_DEF_STMT (mask);
3833 }
3834
3835 if (!is_gimple_assign (gs: g)
3836 || gimple_assign_rhs_code (gs: g) != LSHIFT_EXPR
3837 || !integer_onep (gimple_assign_rhs1 (gs: g)))
3838 return false;
3839 bit = gimple_assign_rhs2 (gs: g);
3840 }
3841 else
3842 return false;
3843
3844 tree cmp_mask;
3845 if (gimple_assign_rhs1 (gs: use_stmt) == lhs)
3846 cmp_mask = gimple_assign_rhs2 (gs: use_stmt);
3847 else
3848 cmp_mask = gimple_assign_rhs1 (gs: use_stmt);
3849
3850 tree match_op;
3851 if (gimple_nop_convert (cmp_mask, &match_op, NULL))
3852 cmp_mask = match_op;
3853
3854 if (!operand_equal_p (cmp_mask, mask, flags: 0))
3855 return false;
3856 }
3857
3858 bool use_bool = true;
3859 bool has_debug_uses = false;
3860 imm_use_iterator iter;
3861 gimple *g;
3862
3863 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs))
3864 use_bool = false;
3865 FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
3866 {
3867 enum tree_code code = ERROR_MARK;
3868 tree op0 = NULL_TREE, op1 = NULL_TREE;
3869 if (is_gimple_debug (gs: g))
3870 {
3871 has_debug_uses = true;
3872 continue;
3873 }
3874 else if (is_gimple_assign (gs: g))
3875 switch (gimple_assign_rhs_code (gs: g))
3876 {
3877 case COND_EXPR:
3878 op1 = gimple_assign_rhs1 (gs: g);
3879 code = TREE_CODE (op1);
3880 if (TREE_CODE_CLASS (code) != tcc_comparison)
3881 break;
3882 op0 = TREE_OPERAND (op1, 0);
3883 op1 = TREE_OPERAND (op1, 1);
3884 break;
3885 case EQ_EXPR:
3886 case NE_EXPR:
3887 code = gimple_assign_rhs_code (gs: g);
3888 op0 = gimple_assign_rhs1 (gs: g);
3889 op1 = gimple_assign_rhs2 (gs: g);
3890 break;
3891 default:
3892 break;
3893 }
3894 else if (gimple_code (g) == GIMPLE_COND)
3895 {
3896 code = gimple_cond_code (gs: g);
3897 op0 = gimple_cond_lhs (gs: g);
3898 op1 = gimple_cond_rhs (gs: g);
3899 }
3900
3901 if ((code == EQ_EXPR || code == NE_EXPR)
3902 && op0 == use_lhs
3903 && integer_zerop (op1))
3904 {
3905 use_operand_p use_p;
3906 int n = 0;
3907 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3908 n++;
3909 if (n == 1)
3910 continue;
3911 }
3912
3913 use_bool = false;
3914 break;
3915 }
3916
3917 tree new_lhs = make_ssa_name (TREE_TYPE (lhs));
3918 tree flag = build_int_cst (TREE_TYPE (lhs), use_bool);
3919 if (has_model_arg)
3920 g = gimple_build_call_internal (fn, 5, gimple_call_arg (gs: call, index: 0),
3921 bit, flag, gimple_call_arg (gs: call, index: 2),
3922 gimple_call_fn (gs: call));
3923 else
3924 g = gimple_build_call_internal (fn, 4, gimple_call_arg (gs: call, index: 0),
3925 bit, flag, gimple_call_fn (gs: call));
3926 gimple_call_set_lhs (gs: g, lhs: new_lhs);
3927 gimple_set_location (g, location: gimple_location (g: call));
3928 gimple_move_vops (g, call);
3929 bool throws = stmt_can_throw_internal (cfun, call);
3930 gimple_call_set_nothrow (s: as_a <gcall *> (p: g),
3931 nothrow_p: gimple_call_nothrow_p (s: as_a <gcall *> (p: call)));
3932 gimple_stmt_iterator gsi = *gsip;
3933 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
3934 edge e = NULL;
3935 if (throws)
3936 {
3937 maybe_clean_or_replace_eh_stmt (call, g);
3938 if (after || (use_bool && has_debug_uses))
3939 e = find_fallthru_edge (edges: gsi_bb (i: gsi)->succs);
3940 }
3941 if (after)
3942 {
3943 /* The internal function returns the value of the specified bit
3944 before the atomic operation. If we are interested in the value
3945 of the specified bit after the atomic operation (makes only sense
3946 for xor, otherwise the bit content is compile time known),
3947 we need to invert the bit. */
3948 tree mask_convert = mask;
3949 gimple_seq stmts = NULL;
3950 if (!use_bool)
3951 mask_convert = gimple_convert (seq: &stmts, TREE_TYPE (lhs), op: mask);
3952 new_lhs = gimple_build (seq: &stmts, code: BIT_XOR_EXPR, TREE_TYPE (lhs), ops: new_lhs,
3953 ops: use_bool ? build_int_cst (TREE_TYPE (lhs), 1)
3954 : mask_convert);
3955 if (throws)
3956 {
3957 gsi_insert_seq_on_edge_immediate (e, stmts);
3958 gsi = gsi_for_stmt (gimple_seq_last (s: stmts));
3959 }
3960 else
3961 gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
3962 }
3963 if (use_bool && has_debug_uses)
3964 {
3965 tree temp = NULL_TREE;
3966 if (!throws || after || single_pred_p (bb: e->dest))
3967 {
3968 temp = build_debug_expr_decl (TREE_TYPE (lhs));
3969 tree t = build2 (LSHIFT_EXPR, TREE_TYPE (lhs), new_lhs, bit);
3970 g = gimple_build_debug_bind (temp, t, g);
3971 if (throws && !after)
3972 {
3973 gsi = gsi_after_labels (bb: e->dest);
3974 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
3975 }
3976 else
3977 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
3978 }
3979 FOR_EACH_IMM_USE_STMT (g, iter, use_lhs)
3980 if (is_gimple_debug (gs: g))
3981 {
3982 use_operand_p use_p;
3983 if (temp == NULL_TREE)
3984 gimple_debug_bind_reset_value (dbg: g);
3985 else
3986 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
3987 SET_USE (use_p, temp);
3988 update_stmt (s: g);
3989 }
3990 }
3991 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_lhs)
3992 = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs);
3993 replace_uses_by (use_lhs, new_lhs);
3994 gsi = gsi_for_stmt (use_stmt);
3995 gsi_remove (&gsi, true);
3996 release_defs (use_stmt);
3997 gsi_remove (gsip, true);
3998 release_ssa_name (name: lhs);
3999 return true;
4000}
4001
4002/* Optimize
4003 _4 = __atomic_add_fetch_* (ptr_6, arg_2, _3);
4004 _5 = _4 == 0;
4005 to
4006 _4 = .ATOMIC_ADD_FETCH_CMP_0 (EQ_EXPR, ptr_6, arg_2, _3);
4007 _5 = _4;
4008 Similarly for __sync_add_and_fetch_* (without the ", _3" part
4009 in there). */
4010
4011static bool
4012optimize_atomic_op_fetch_cmp_0 (gimple_stmt_iterator *gsip,
4013 enum internal_fn fn, bool has_model_arg)
4014{
4015 gimple *call = gsi_stmt (i: *gsip);
4016 tree lhs = gimple_call_lhs (gs: call);
4017 use_operand_p use_p;
4018 gimple *use_stmt;
4019
4020 if (!flag_inline_atomics
4021 || optimize_debug
4022 || !gimple_call_builtin_p (call, BUILT_IN_NORMAL)
4023 || !lhs
4024 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)
4025 || !single_imm_use (var: lhs, use_p: &use_p, stmt: &use_stmt)
4026 || !gimple_vdef (g: call))
4027 return false;
4028
4029 optab optab;
4030 switch (fn)
4031 {
4032 case IFN_ATOMIC_ADD_FETCH_CMP_0:
4033 optab = atomic_add_fetch_cmp_0_optab;
4034 break;
4035 case IFN_ATOMIC_SUB_FETCH_CMP_0:
4036 optab = atomic_sub_fetch_cmp_0_optab;
4037 break;
4038 case IFN_ATOMIC_AND_FETCH_CMP_0:
4039 optab = atomic_and_fetch_cmp_0_optab;
4040 break;
4041 case IFN_ATOMIC_OR_FETCH_CMP_0:
4042 optab = atomic_or_fetch_cmp_0_optab;
4043 break;
4044 case IFN_ATOMIC_XOR_FETCH_CMP_0:
4045 optab = atomic_xor_fetch_cmp_0_optab;
4046 break;
4047 default:
4048 return false;
4049 }
4050
4051 if (optab_handler (op: optab, TYPE_MODE (TREE_TYPE (lhs)))
4052 == CODE_FOR_nothing)
4053 return false;
4054
4055 tree use_lhs = lhs;
4056 if (gimple_assign_cast_p (s: use_stmt))
4057 {
4058 use_lhs = gimple_assign_lhs (gs: use_stmt);
4059 if (!tree_nop_conversion_p (TREE_TYPE (use_lhs), TREE_TYPE (lhs))
4060 || (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
4061 && !POINTER_TYPE_P (TREE_TYPE (use_lhs)))
4062 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use_lhs)
4063 || !single_imm_use (var: use_lhs, use_p: &use_p, stmt: &use_stmt))
4064 return false;
4065 }
4066 enum tree_code code = ERROR_MARK;
4067 tree op0 = NULL_TREE, op1 = NULL_TREE;
4068 if (is_gimple_assign (gs: use_stmt))
4069 switch (gimple_assign_rhs_code (gs: use_stmt))
4070 {
4071 case COND_EXPR:
4072 op1 = gimple_assign_rhs1 (gs: use_stmt);
4073 code = TREE_CODE (op1);
4074 if (TREE_CODE_CLASS (code) == tcc_comparison)
4075 {
4076 op0 = TREE_OPERAND (op1, 0);
4077 op1 = TREE_OPERAND (op1, 1);
4078 }
4079 break;
4080 default:
4081 code = gimple_assign_rhs_code (gs: use_stmt);
4082 if (TREE_CODE_CLASS (code) == tcc_comparison)
4083 {
4084 op0 = gimple_assign_rhs1 (gs: use_stmt);
4085 op1 = gimple_assign_rhs2 (gs: use_stmt);
4086 }
4087 break;
4088 }
4089 else if (gimple_code (g: use_stmt) == GIMPLE_COND)
4090 {
4091 code = gimple_cond_code (gs: use_stmt);
4092 op0 = gimple_cond_lhs (gs: use_stmt);
4093 op1 = gimple_cond_rhs (gs: use_stmt);
4094 }
4095
4096 switch (code)
4097 {
4098 case LT_EXPR:
4099 case LE_EXPR:
4100 case GT_EXPR:
4101 case GE_EXPR:
4102 if (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs))
4103 || TREE_CODE (TREE_TYPE (use_lhs)) == BOOLEAN_TYPE
4104 || TYPE_UNSIGNED (TREE_TYPE (use_lhs)))
4105 return false;
4106 /* FALLTHRU */
4107 case EQ_EXPR:
4108 case NE_EXPR:
4109 if (op0 == use_lhs && integer_zerop (op1))
4110 break;
4111 return false;
4112 default:
4113 return false;
4114 }
4115
4116 int encoded;
4117 switch (code)
4118 {
4119 /* Use special encoding of the operation. We want to also
4120 encode the mode in the first argument and for neither EQ_EXPR
4121 etc. nor EQ etc. we can rely it will fit into QImode. */
4122 case EQ_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_EQ; break;
4123 case NE_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_NE; break;
4124 case LT_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_LT; break;
4125 case LE_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_LE; break;
4126 case GT_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_GT; break;
4127 case GE_EXPR: encoded = ATOMIC_OP_FETCH_CMP_0_GE; break;
4128 default: gcc_unreachable ();
4129 }
4130
4131 tree new_lhs = make_ssa_name (boolean_type_node);
4132 gimple *g;
4133 tree flag = build_int_cst (TREE_TYPE (lhs), encoded);
4134 if (has_model_arg)
4135 g = gimple_build_call_internal (fn, 5, flag,
4136 gimple_call_arg (gs: call, index: 0),
4137 gimple_call_arg (gs: call, index: 1),
4138 gimple_call_arg (gs: call, index: 2),
4139 gimple_call_fn (gs: call));
4140 else
4141 g = gimple_build_call_internal (fn, 4, flag,
4142 gimple_call_arg (gs: call, index: 0),
4143 gimple_call_arg (gs: call, index: 1),
4144 gimple_call_fn (gs: call));
4145 gimple_call_set_lhs (gs: g, lhs: new_lhs);
4146 gimple_set_location (g, location: gimple_location (g: call));
4147 gimple_move_vops (g, call);
4148 bool throws = stmt_can_throw_internal (cfun, call);
4149 gimple_call_set_nothrow (s: as_a <gcall *> (p: g),
4150 nothrow_p: gimple_call_nothrow_p (s: as_a <gcall *> (p: call)));
4151 gimple_stmt_iterator gsi = *gsip;
4152 gsi_insert_after (&gsi, g, GSI_SAME_STMT);
4153 if (throws)
4154 maybe_clean_or_replace_eh_stmt (call, g);
4155 if (is_gimple_assign (gs: use_stmt))
4156 switch (gimple_assign_rhs_code (gs: use_stmt))
4157 {
4158 case COND_EXPR:
4159 gimple_assign_set_rhs1 (gs: use_stmt, rhs: new_lhs);
4160 break;
4161 default:
4162 gsi = gsi_for_stmt (use_stmt);
4163 if (tree ulhs = gimple_assign_lhs (gs: use_stmt))
4164 if (useless_type_conversion_p (TREE_TYPE (ulhs),
4165 boolean_type_node))
4166 {
4167 gimple_assign_set_rhs_with_ops (gsi: &gsi, code: SSA_NAME, op1: new_lhs);
4168 break;
4169 }
4170 gimple_assign_set_rhs_with_ops (gsi: &gsi, code: NOP_EXPR, op1: new_lhs);
4171 break;
4172 }
4173 else if (gimple_code (g: use_stmt) == GIMPLE_COND)
4174 {
4175 gcond *use_cond = as_a <gcond *> (p: use_stmt);
4176 gimple_cond_set_code (gs: use_cond, code: NE_EXPR);
4177 gimple_cond_set_lhs (gs: use_cond, lhs: new_lhs);
4178 gimple_cond_set_rhs (gs: use_cond, boolean_false_node);
4179 }
4180
4181 update_stmt (s: use_stmt);
4182 if (use_lhs != lhs)
4183 {
4184 gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (use_lhs));
4185 gsi_remove (&gsi, true);
4186 release_ssa_name (name: use_lhs);
4187 }
4188 gsi_remove (gsip, true);
4189 release_ssa_name (name: lhs);
4190 return true;
4191}
4192
4193/* A simple pass that attempts to fold all builtin functions. This pass
4194 is run after we've propagated as many constants as we can. */
4195
4196namespace {
4197
4198const pass_data pass_data_fold_builtins =
4199{
4200 .type: GIMPLE_PASS, /* type */
4201 .name: "fab", /* name */
4202 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
4203 .tv_id: TV_NONE, /* tv_id */
4204 .properties_required: ( PROP_cfg | PROP_ssa ), /* properties_required */
4205 .properties_provided: 0, /* properties_provided */
4206 .properties_destroyed: 0, /* properties_destroyed */
4207 .todo_flags_start: 0, /* todo_flags_start */
4208 TODO_update_ssa, /* todo_flags_finish */
4209};
4210
4211class pass_fold_builtins : public gimple_opt_pass
4212{
4213public:
4214 pass_fold_builtins (gcc::context *ctxt)
4215 : gimple_opt_pass (pass_data_fold_builtins, ctxt)
4216 {}
4217
4218 /* opt_pass methods: */
4219 opt_pass * clone () final override { return new pass_fold_builtins (m_ctxt); }
4220 unsigned int execute (function *) final override;
4221
4222}; // class pass_fold_builtins
4223
4224unsigned int
4225pass_fold_builtins::execute (function *fun)
4226{
4227 bool cfg_changed = false;
4228 basic_block bb;
4229 unsigned int todoflags = 0;
4230
4231 FOR_EACH_BB_FN (bb, fun)
4232 {
4233 gimple_stmt_iterator i;
4234 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
4235 {
4236 gimple *stmt, *old_stmt;
4237 tree callee;
4238 enum built_in_function fcode;
4239
4240 stmt = gsi_stmt (i);
4241
4242 if (gimple_code (g: stmt) != GIMPLE_CALL)
4243 {
4244 gsi_next (i: &i);
4245 continue;
4246 }
4247
4248 callee = gimple_call_fndecl (gs: stmt);
4249 if (!callee
4250 && gimple_call_internal_p (gs: stmt, fn: IFN_ASSUME))
4251 {
4252 gsi_remove (&i, true);
4253 continue;
4254 }
4255 if (!callee || !fndecl_built_in_p (node: callee, klass: BUILT_IN_NORMAL))
4256 {
4257 gsi_next (i: &i);
4258 continue;
4259 }
4260
4261 fcode = DECL_FUNCTION_CODE (decl: callee);
4262 if (fold_stmt (&i))
4263 ;
4264 else
4265 {
4266 tree result = NULL_TREE;
4267 switch (DECL_FUNCTION_CODE (decl: callee))
4268 {
4269 case BUILT_IN_CONSTANT_P:
4270 /* Resolve __builtin_constant_p. If it hasn't been
4271 folded to integer_one_node by now, it's fairly
4272 certain that the value simply isn't constant. */
4273 result = integer_zero_node;
4274 break;
4275
4276 case BUILT_IN_ASSUME_ALIGNED:
4277 /* Remove __builtin_assume_aligned. */
4278 result = gimple_call_arg (gs: stmt, index: 0);
4279 break;
4280
4281 case BUILT_IN_STACK_RESTORE:
4282 result = optimize_stack_restore (i);
4283 if (result)
4284 break;
4285 gsi_next (i: &i);
4286 continue;
4287
4288 case BUILT_IN_UNREACHABLE:
4289 if (optimize_unreachable (i))
4290 cfg_changed = true;
4291 break;
4292
4293 case BUILT_IN_ATOMIC_ADD_FETCH_1:
4294 case BUILT_IN_ATOMIC_ADD_FETCH_2:
4295 case BUILT_IN_ATOMIC_ADD_FETCH_4:
4296 case BUILT_IN_ATOMIC_ADD_FETCH_8:
4297 case BUILT_IN_ATOMIC_ADD_FETCH_16:
4298 optimize_atomic_op_fetch_cmp_0 (gsip: &i,
4299 fn: IFN_ATOMIC_ADD_FETCH_CMP_0,
4300 has_model_arg: true);
4301 break;
4302 case BUILT_IN_SYNC_ADD_AND_FETCH_1:
4303 case BUILT_IN_SYNC_ADD_AND_FETCH_2:
4304 case BUILT_IN_SYNC_ADD_AND_FETCH_4:
4305 case BUILT_IN_SYNC_ADD_AND_FETCH_8:
4306 case BUILT_IN_SYNC_ADD_AND_FETCH_16:
4307 optimize_atomic_op_fetch_cmp_0 (gsip: &i,
4308 fn: IFN_ATOMIC_ADD_FETCH_CMP_0,
4309 has_model_arg: false);
4310 break;
4311
4312 case BUILT_IN_ATOMIC_SUB_FETCH_1:
4313 case BUILT_IN_ATOMIC_SUB_FETCH_2:
4314 case BUILT_IN_ATOMIC_SUB_FETCH_4:
4315 case BUILT_IN_ATOMIC_SUB_FETCH_8:
4316 case BUILT_IN_ATOMIC_SUB_FETCH_16:
4317 optimize_atomic_op_fetch_cmp_0 (gsip: &i,
4318 fn: IFN_ATOMIC_SUB_FETCH_CMP_0,
4319 has_model_arg: true);
4320 break;
4321 case BUILT_IN_SYNC_SUB_AND_FETCH_1:
4322 case BUILT_IN_SYNC_SUB_AND_FETCH_2:
4323 case BUILT_IN_SYNC_SUB_AND_FETCH_4:
4324 case BUILT_IN_SYNC_SUB_AND_FETCH_8:
4325 case BUILT_IN_SYNC_SUB_AND_FETCH_16:
4326 optimize_atomic_op_fetch_cmp_0 (gsip: &i,
4327 fn: IFN_ATOMIC_SUB_FETCH_CMP_0,
4328 has_model_arg: false);
4329 break;
4330
4331 case BUILT_IN_ATOMIC_FETCH_OR_1:
4332 case BUILT_IN_ATOMIC_FETCH_OR_2:
4333 case BUILT_IN_ATOMIC_FETCH_OR_4:
4334 case BUILT_IN_ATOMIC_FETCH_OR_8:
4335 case BUILT_IN_ATOMIC_FETCH_OR_16:
4336 optimize_atomic_bit_test_and (gsip: &i,
4337 fn: IFN_ATOMIC_BIT_TEST_AND_SET,
4338 has_model_arg: true, after: false);
4339 break;
4340 case BUILT_IN_SYNC_FETCH_AND_OR_1:
4341 case BUILT_IN_SYNC_FETCH_AND_OR_2:
4342 case BUILT_IN_SYNC_FETCH_AND_OR_4:
4343 case BUILT_IN_SYNC_FETCH_AND_OR_8:
4344 case BUILT_IN_SYNC_FETCH_AND_OR_16:
4345 optimize_atomic_bit_test_and (gsip: &i,
4346 fn: IFN_ATOMIC_BIT_TEST_AND_SET,
4347 has_model_arg: false, after: false);
4348 break;
4349
4350 case BUILT_IN_ATOMIC_FETCH_XOR_1:
4351 case BUILT_IN_ATOMIC_FETCH_XOR_2:
4352 case BUILT_IN_ATOMIC_FETCH_XOR_4:
4353 case BUILT_IN_ATOMIC_FETCH_XOR_8:
4354 case BUILT_IN_ATOMIC_FETCH_XOR_16:
4355 optimize_atomic_bit_test_and
4356 (gsip: &i, fn: IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, has_model_arg: true, after: false);
4357 break;
4358 case BUILT_IN_SYNC_FETCH_AND_XOR_1:
4359 case BUILT_IN_SYNC_FETCH_AND_XOR_2:
4360 case BUILT_IN_SYNC_FETCH_AND_XOR_4:
4361 case BUILT_IN_SYNC_FETCH_AND_XOR_8:
4362 case BUILT_IN_SYNC_FETCH_AND_XOR_16:
4363 optimize_atomic_bit_test_and
4364 (gsip: &i, fn: IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, has_model_arg: false, after: false);
4365 break;
4366
4367 case BUILT_IN_ATOMIC_XOR_FETCH_1:
4368 case BUILT_IN_ATOMIC_XOR_FETCH_2:
4369 case BUILT_IN_ATOMIC_XOR_FETCH_4:
4370 case BUILT_IN_ATOMIC_XOR_FETCH_8:
4371 case BUILT_IN_ATOMIC_XOR_FETCH_16:
4372 if (optimize_atomic_bit_test_and
4373 (gsip: &i, fn: IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, has_model_arg: true, after: true))
4374 break;
4375 optimize_atomic_op_fetch_cmp_0 (gsip: &i,
4376 fn: IFN_ATOMIC_XOR_FETCH_CMP_0,
4377 has_model_arg: true);
4378 break;
4379 case BUILT_IN_SYNC_XOR_AND_FETCH_1:
4380 case BUILT_IN_SYNC_XOR_AND_FETCH_2:
4381 case BUILT_IN_SYNC_XOR_AND_FETCH_4:
4382 case BUILT_IN_SYNC_XOR_AND_FETCH_8:
4383 case BUILT_IN_SYNC_XOR_AND_FETCH_16:
4384 if (optimize_atomic_bit_test_and
4385 (gsip: &i, fn: IFN_ATOMIC_BIT_TEST_AND_COMPLEMENT, has_model_arg: false, after: true))
4386 break;
4387 optimize_atomic_op_fetch_cmp_0 (gsip: &i,
4388 fn: IFN_ATOMIC_XOR_FETCH_CMP_0,
4389 has_model_arg: false);
4390 break;
4391
4392 case BUILT_IN_ATOMIC_FETCH_AND_1:
4393 case BUILT_IN_ATOMIC_FETCH_AND_2:
4394 case BUILT_IN_ATOMIC_FETCH_AND_4:
4395 case BUILT_IN_ATOMIC_FETCH_AND_8:
4396 case BUILT_IN_ATOMIC_FETCH_AND_16:
4397 optimize_atomic_bit_test_and (gsip: &i,
4398 fn: IFN_ATOMIC_BIT_TEST_AND_RESET,
4399 has_model_arg: true, after: false);
4400 break;
4401 case BUILT_IN_SYNC_FETCH_AND_AND_1:
4402 case BUILT_IN_SYNC_FETCH_AND_AND_2:
4403 case BUILT_IN_SYNC_FETCH_AND_AND_4:
4404 case BUILT_IN_SYNC_FETCH_AND_AND_8:
4405 case BUILT_IN_SYNC_FETCH_AND_AND_16:
4406 optimize_atomic_bit_test_and (gsip: &i,
4407 fn: IFN_ATOMIC_BIT_TEST_AND_RESET,
4408 has_model_arg: false, after: false);
4409 break;
4410
4411 case BUILT_IN_ATOMIC_AND_FETCH_1:
4412 case BUILT_IN_ATOMIC_AND_FETCH_2:
4413 case BUILT_IN_ATOMIC_AND_FETCH_4:
4414 case BUILT_IN_ATOMIC_AND_FETCH_8:
4415 case BUILT_IN_ATOMIC_AND_FETCH_16:
4416 optimize_atomic_op_fetch_cmp_0 (gsip: &i,
4417 fn: IFN_ATOMIC_AND_FETCH_CMP_0,
4418 has_model_arg: true);
4419 break;
4420 case BUILT_IN_SYNC_AND_AND_FETCH_1:
4421 case BUILT_IN_SYNC_AND_AND_FETCH_2:
4422 case BUILT_IN_SYNC_AND_AND_FETCH_4:
4423 case BUILT_IN_SYNC_AND_AND_FETCH_8:
4424 case BUILT_IN_SYNC_AND_AND_FETCH_16:
4425 optimize_atomic_op_fetch_cmp_0 (gsip: &i,
4426 fn: IFN_ATOMIC_AND_FETCH_CMP_0,
4427 has_model_arg: false);
4428 break;
4429
4430 case BUILT_IN_ATOMIC_OR_FETCH_1:
4431 case BUILT_IN_ATOMIC_OR_FETCH_2:
4432 case BUILT_IN_ATOMIC_OR_FETCH_4:
4433 case BUILT_IN_ATOMIC_OR_FETCH_8:
4434 case BUILT_IN_ATOMIC_OR_FETCH_16:
4435 optimize_atomic_op_fetch_cmp_0 (gsip: &i,
4436 fn: IFN_ATOMIC_OR_FETCH_CMP_0,
4437 has_model_arg: true);
4438 break;
4439 case BUILT_IN_SYNC_OR_AND_FETCH_1:
4440 case BUILT_IN_SYNC_OR_AND_FETCH_2:
4441 case BUILT_IN_SYNC_OR_AND_FETCH_4:
4442 case BUILT_IN_SYNC_OR_AND_FETCH_8:
4443 case BUILT_IN_SYNC_OR_AND_FETCH_16:
4444 optimize_atomic_op_fetch_cmp_0 (gsip: &i,
4445 fn: IFN_ATOMIC_OR_FETCH_CMP_0,
4446 has_model_arg: false);
4447 break;
4448
4449 case BUILT_IN_VA_START:
4450 case BUILT_IN_VA_END:
4451 case BUILT_IN_VA_COPY:
4452 /* These shouldn't be folded before pass_stdarg. */
4453 result = optimize_stdarg_builtin (call: stmt);
4454 break;
4455
4456 default:;
4457 }
4458
4459 if (!result)
4460 {
4461 gsi_next (i: &i);
4462 continue;
4463 }
4464
4465 gimplify_and_update_call_from_tree (&i, result);
4466 }
4467
4468 todoflags |= TODO_update_address_taken;
4469
4470 if (dump_file && (dump_flags & TDF_DETAILS))
4471 {
4472 fprintf (stream: dump_file, format: "Simplified\n ");
4473 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
4474 }
4475
4476 old_stmt = stmt;
4477 stmt = gsi_stmt (i);
4478 update_stmt (s: stmt);
4479
4480 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)
4481 && gimple_purge_dead_eh_edges (bb))
4482 cfg_changed = true;
4483
4484 if (dump_file && (dump_flags & TDF_DETAILS))
4485 {
4486 fprintf (stream: dump_file, format: "to\n ");
4487 print_gimple_stmt (dump_file, stmt, 0, dump_flags);
4488 fprintf (stream: dump_file, format: "\n");
4489 }
4490
4491 /* Retry the same statement if it changed into another
4492 builtin, there might be new opportunities now. */
4493 if (gimple_code (g: stmt) != GIMPLE_CALL)
4494 {
4495 gsi_next (i: &i);
4496 continue;
4497 }
4498 callee = gimple_call_fndecl (gs: stmt);
4499 if (!callee
4500 || !fndecl_built_in_p (node: callee, name1: fcode))
4501 gsi_next (i: &i);
4502 }
4503 }
4504
4505 /* Delete unreachable blocks. */
4506 if (cfg_changed)
4507 todoflags |= TODO_cleanup_cfg;
4508
4509 return todoflags;
4510}
4511
4512} // anon namespace
4513
4514gimple_opt_pass *
4515make_pass_fold_builtins (gcc::context *ctxt)
4516{
4517 return new pass_fold_builtins (ctxt);
4518}
4519
4520/* A simple pass that emits some warnings post IPA. */
4521
4522namespace {
4523
4524const pass_data pass_data_post_ipa_warn =
4525{
4526 .type: GIMPLE_PASS, /* type */
4527 .name: "post_ipa_warn", /* name */
4528 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
4529 .tv_id: TV_NONE, /* tv_id */
4530 .properties_required: ( PROP_cfg | PROP_ssa ), /* properties_required */
4531 .properties_provided: 0, /* properties_provided */
4532 .properties_destroyed: 0, /* properties_destroyed */
4533 .todo_flags_start: 0, /* todo_flags_start */
4534 .todo_flags_finish: 0, /* todo_flags_finish */
4535};
4536
4537class pass_post_ipa_warn : public gimple_opt_pass
4538{
4539public:
4540 pass_post_ipa_warn (gcc::context *ctxt)
4541 : gimple_opt_pass (pass_data_post_ipa_warn, ctxt)
4542 {}
4543
4544 /* opt_pass methods: */
4545 opt_pass * clone () final override { return new pass_post_ipa_warn (m_ctxt); }
4546 bool gate (function *) final override { return warn_nonnull != 0; }
4547 unsigned int execute (function *) final override;
4548
4549}; // class pass_fold_builtins
4550
4551unsigned int
4552pass_post_ipa_warn::execute (function *fun)
4553{
4554 basic_block bb;
4555 gimple_ranger *ranger = NULL;
4556
4557 FOR_EACH_BB_FN (bb, fun)
4558 {
4559 gimple_stmt_iterator gsi;
4560 for (gsi = gsi_start_bb (bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi))
4561 {
4562 gimple *stmt = gsi_stmt (i: gsi);
4563 if (!is_gimple_call (gs: stmt) || warning_suppressed_p (stmt, OPT_Wnonnull))
4564 continue;
4565
4566 tree fntype = gimple_call_fntype (gs: stmt);
4567 if (!fntype)
4568 continue;
4569 bitmap nonnullargs = get_nonnull_args (fntype);
4570
4571 tree fndecl = gimple_call_fndecl (gs: stmt);
4572 const bool closure = fndecl && DECL_LAMBDA_FUNCTION_P (fndecl);
4573
4574 for (unsigned i = nonnullargs ? 0 : ~0U;
4575 i < gimple_call_num_args (gs: stmt); i++)
4576 {
4577 tree arg = gimple_call_arg (gs: stmt, index: i);
4578 if (TREE_CODE (TREE_TYPE (arg)) != POINTER_TYPE)
4579 continue;
4580 if (!integer_zerop (arg))
4581 continue;
4582 if (i == 0 && closure)
4583 /* Avoid warning for the first argument to lambda functions. */
4584 continue;
4585 if (!bitmap_empty_p (map: nonnullargs)
4586 && !bitmap_bit_p (nonnullargs, i))
4587 continue;
4588
4589 /* In C++ non-static member functions argument 0 refers
4590 to the implicit this pointer. Use the same one-based
4591 numbering for ordinary arguments. */
4592 unsigned argno = TREE_CODE (fntype) == METHOD_TYPE ? i : i + 1;
4593 location_t loc = (EXPR_HAS_LOCATION (arg)
4594 ? EXPR_LOCATION (arg)
4595 : gimple_location (g: stmt));
4596 auto_diagnostic_group d;
4597 if (argno == 0)
4598 {
4599 if (warning_at (loc, OPT_Wnonnull,
4600 "%qs pointer is null", "this")
4601 && fndecl)
4602 inform (DECL_SOURCE_LOCATION (fndecl),
4603 "in a call to non-static member function %qD",
4604 fndecl);
4605 continue;
4606 }
4607
4608 if (!warning_at (loc, OPT_Wnonnull,
4609 "argument %u null where non-null "
4610 "expected", argno))
4611 continue;
4612
4613 tree fndecl = gimple_call_fndecl (gs: stmt);
4614 if (fndecl && DECL_IS_UNDECLARED_BUILTIN (fndecl))
4615 inform (loc, "in a call to built-in function %qD",
4616 fndecl);
4617 else if (fndecl)
4618 inform (DECL_SOURCE_LOCATION (fndecl),
4619 "in a call to function %qD declared %qs",
4620 fndecl, "nonnull");
4621 }
4622 BITMAP_FREE (nonnullargs);
4623
4624 for (tree attrs = TYPE_ATTRIBUTES (fntype);
4625 (attrs = lookup_attribute (attr_name: "nonnull_if_nonzero", list: attrs));
4626 attrs = TREE_CHAIN (attrs))
4627 {
4628 tree args = TREE_VALUE (attrs);
4629 unsigned int idx = TREE_INT_CST_LOW (TREE_VALUE (args)) - 1;
4630 unsigned int idx2
4631 = TREE_INT_CST_LOW (TREE_VALUE (TREE_CHAIN (args))) - 1;
4632 if (idx < gimple_call_num_args (gs: stmt)
4633 && idx2 < gimple_call_num_args (gs: stmt))
4634 {
4635 tree arg = gimple_call_arg (gs: stmt, index: idx);
4636 tree arg2 = gimple_call_arg (gs: stmt, index: idx2);
4637 if (TREE_CODE (TREE_TYPE (arg)) != POINTER_TYPE
4638 || !integer_zerop (arg)
4639 || !INTEGRAL_TYPE_P (TREE_TYPE (arg2))
4640 || integer_zerop (arg2)
4641 || ((TREE_CODE (fntype) == METHOD_TYPE || closure)
4642 && (idx == 0 || idx2 == 0)))
4643 continue;
4644 if (!integer_nonzerop (arg2)
4645 && !tree_expr_nonzero_p (arg2))
4646 {
4647 if (TREE_CODE (arg2) != SSA_NAME || optimize < 2)
4648 continue;
4649 if (!ranger)
4650 ranger = enable_ranger (cfun);
4651
4652 int_range_max vr;
4653 get_range_query (cfun)->range_of_expr (r&: vr, expr: arg2, stmt);
4654 if (range_includes_zero_p (vr))
4655 continue;
4656 }
4657 unsigned argno = idx + 1;
4658 unsigned argno2 = idx2 + 1;
4659 location_t loc = (EXPR_HAS_LOCATION (arg)
4660 ? EXPR_LOCATION (arg)
4661 : gimple_location (g: stmt));
4662 auto_diagnostic_group d;
4663
4664 if (!warning_at (loc, OPT_Wnonnull,
4665 "argument %u null where non-null "
4666 "expected because argument %u is "
4667 "nonzero", argno, argno2))
4668 continue;
4669
4670 tree fndecl = gimple_call_fndecl (gs: stmt);
4671 if (fndecl && DECL_IS_UNDECLARED_BUILTIN (fndecl))
4672 inform (loc, "in a call to built-in function %qD",
4673 fndecl);
4674 else if (fndecl)
4675 inform (DECL_SOURCE_LOCATION (fndecl),
4676 "in a call to function %qD declared %qs",
4677 fndecl, "nonnull_if_nonzero");
4678 }
4679 }
4680 }
4681 }
4682 if (ranger)
4683 disable_ranger (cfun);
4684 return 0;
4685}
4686
4687} // anon namespace
4688
4689gimple_opt_pass *
4690make_pass_post_ipa_warn (gcc::context *ctxt)
4691{
4692 return new pass_post_ipa_warn (ctxt);
4693}
4694

Provided by KDAB

Privacy Policy
Improve your Profiling and Debugging skills
Find out more

source code of gcc/tree-ssa-ccp.cc