Static Value-Flow Analysis
Loading...
Searching...
No Matches
AbstractInterpretation.cpp
Go to the documentation of this file.
1//===- AbstractExecution.cpp -- Abstract Execution---------------------------------//
2//
3// SVF: Static Value-Flow Analysis
4//
5// Copyright (C) <2013-> <Yulei Sui>
6//
7
8// This program is free software: you can redistribute it and/or modify
9// it under the terms of the GNU Affero General Public License as published by
10// the Free Software Foundation, either version 3 of the License, or
11// (at your option) any later version.
12
13// This program is distributed in the hope that it will be useful,
14// but WITHOUT ANY WARRANTY; without even the implied warranty of
15// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16// GNU Affero General Public License for more details.
17
18// You should have received a copy of the GNU Affero General Public License
19// along with this program. If not, see <http://www.gnu.org/licenses/>.
20//
21//===----------------------------------------------------------------------===//
22
23
24//
25// Created on: Jan 10, 2024
26// Author: Xiao Cheng, Jiawei Wang
27//
28
30#include "AE/Svfexe/AbsExtAPI.h"
31#include "SVFIR/SVFIR.h"
32#include "Util/Options.h"
33#include "Util/WorkList.h"
34#include "Graphs/CallGraph.h"
35#include "WPA/Andersen.h"
36#include <cmath>
37#include <deque>
38
39using namespace SVF;
40using namespace SVFUtil;
41
42
44{
45 stat->startClk();
46 icfg = _icfg;
48 // Run Andersen's pointer analysis and build WTO
54
57
60
61 analyse();
63 stat->endClk();
65 if (Options::PStat())
67 for (auto& detector: detectors)
68 detector->reportBug();
69}
70
75
76
85
86
89{
90 delete utils;
91 delete stat;
92 delete preAnalysis;
93 delete svfStateMgr;
94}
95
100{
101 std::deque<const FunObjVar*> entryFunctions;
102
103 for (auto it = callGraph->begin(); it != callGraph->end(); ++it)
104 {
105 const CallGraphNode* cgNode = it->second;
106 const FunObjVar* fun = cgNode->getFunction();
107
108 // Skip declarations
109 if (fun->isDeclaration())
110 continue;
111
112 // Entry points are functions without callers (no incoming edges)
113 if (cgNode->getInEdges().empty())
114 {
115 // If main exists, put it first for priority using deque's push_front
116 if (fun->getName() == "main")
117 {
118 entryFunctions.push_front(fun);
119 }
120 else
121 {
122 entryFunctions.push_back(fun);
123 }
124 }
125 }
126
127 return entryFunctions;
128}
129
130
133{
134 // Always use multi-entry analysis from all entry points
136}
137
142{
143 // Collect all entry point functions
144 std::deque<const FunObjVar*> entryFunctions = collectProgEntryFuns();
145
146 if (entryFunctions.empty())
147 {
148 assert(false && "No entry functions found for analysis");
149 return;
150 }
151 // handle Global ICFGNode of SVFModule
153 for (const FunObjVar* entryFun : entryFunctions)
154 {
157 }
158}
159
166{
167 const ICFGNode* node = icfg->getGlobalICFGNode();
168 // Global init is one of the few legitimate direct-mutation sites:
169 // updateAbsState filters out ValVars in semi-sparse mode, but NullPtr/
170 // BlkPtr have no SVFVar so we cannot route them through updateAbsValue.
171 // Use the manager's operator[] (auto-creates the entry if absent).
172 AbstractState& init = (*svfStateMgr)[node];
173 init = AbstractState();
174 // TODO: we cannot find right SVFVar for NullPtr, so we use init[NullPtr]
175 // directly. Same for BlkPtr below.
177
178 // Global Node, we just need to handle addr, load, store, copy and gep
179 for (const SVFStmt *stmt: node->getSVFStmts())
180 {
182 }
183
184 // BlkPtr represents a pointer whose target is statically unknown (e.g., from
185 // int2ptr casts, external function returns, or unmodeled instructions like
186 // AtomicCmpXchg). It should be an address pointing to the BlackHole object
187 // (ID=2), NOT an interval top.
188 //
189 // History: this was originally set to IntervalValue::top() as a quick fix when
190 // the analysis crashed on programs containing uninitialized BlkPtr. However,
191 // BlkPtr is semantically a *pointer* (address domain), not a numeric value
192 // (interval domain). Setting it to interval top broke cross-domain consistency:
193 // the interval domain and address domain gave contradictory information for the
194 // same variable. The correct representation is an AddressValue containing the
195 // BlackHole virtual address, which means "points to unknown memory".
196 (*svfStateMgr)[node][PAG::getPAG()->getBlkPtr()] = AddressValue(BlackHoleObjAddr);
197}
198
205{
206 // Collect all feasible predecessor states, then merge at the end.
208 bool hasFeasiblePred = false;
209
210 for (auto& edge : node->getInEdges())
211 {
212 const ICFGNode* pred = edge->getSrcNode();
213 if (!hasAbsState(pred))
214 continue;
215
216 if (const IntraCFGEdge* intraCfgEdge = SVFUtil::dyn_cast<IntraCFGEdge>(edge))
217 {
218 if (intraCfgEdge->getCondition())
219 {
222 {
223 merged.joinWith(predState);
224 hasFeasiblePred = true;
225 }
226 }
227 else
228 {
229 merged.joinWith(getAbsState(pred));
230 hasFeasiblePred = true;
231 }
232 }
233 else if (SVFUtil::isa<CallCFGEdge>(edge))
234 {
235 merged.joinWith(getAbsState(pred));
236 hasFeasiblePred = true;
237 }
238 else if (SVFUtil::isa<RetCFGEdge>(edge))
239 {
240 switch (Options::HandleRecur())
241 {
242 case TOP:
243 merged.joinWith(getAbsState(pred));
244 hasFeasiblePred = true;
245 break;
246 case WIDEN_ONLY:
247 case WIDEN_NARROW:
248 {
249 const RetICFGNode* returnSite = SVFUtil::dyn_cast<RetICFGNode>(node);
250 const CallICFGNode* callSite = returnSite->getCallICFGNode();
252 {
253 merged.joinWith(getAbsState(pred));
254 hasFeasiblePred = true;
255 }
256 break;
257 }
258 }
259 }
260 }
261
262 if (!hasFeasiblePred)
263 return false;
264
265 updateAbsState(node, merged);
266
267 return true;
268}
269
270
281static const LoadStmt* findBackingLoad(const SVFVar* var)
282{
283 if (var->getInEdges().empty())
284 return nullptr;
285 SVFStmt* inStmt = *var->getInEdges().begin();
286 if (const LoadStmt* ls = SVFUtil::dyn_cast<LoadStmt>(inStmt))
287 return ls;
288 if (const CopyStmt* cs = SVFUtil::dyn_cast<CopyStmt>(inStmt))
289 {
290 const SVFVar* src = cs->getRHSVar();
291 if (!src->getInEdges().empty())
292 return SVFUtil::dyn_cast<LoadStmt>(*src->getInEdges().begin());
293 }
294 return nullptr;
295}
296
311 bool isLHS, const IntervalValue& self,
312 const IntervalValue& other)
313{
314 // Normalize: always reason from the LHS perspective.
315 // If we are the RHS operand, swap the predicate direction.
316 if (!isLHS)
317 {
318 // a > b from b's perspective: b < a
319 static const Map<s32_t, s32_t> swapPred =
320 {
343 };
344 auto it = swapPred.find(predicate);
345 if (it == swapPred.end()) return IntervalValue::top();
346 predicate = it->second;
347 }
348
349 // If false branch, negate the predicate.
350 if (succ == 0)
351 {
352 static const Map<s32_t, s32_t> negPred =
353 {
376 };
377 auto it = negPred.find(predicate);
378 if (it == negPred.end()) return IntervalValue::top();
379 predicate = it->second;
380 }
381
382 // Now compute the constraint on LHS given: LHS <predicate> other
384 switch (predicate)
385 {
386 case CmpStmt::ICMP_EQ:
390 break;
391 case CmpStmt::ICMP_NE:
396 return IntervalValue::top(); // no useful narrowing
402 break;
408 break;
414 break;
420 break;
421 default:
422 return IntervalValue::top();
423 }
424 return result;
425}
426
429{
430 const ICFGNode* pred = edge->getSrcNode();
431 s64_t succ = edge->getSuccessorCondValue();
432 const CmpStmt* cmpStmt = SVFUtil::cast<CmpStmt>(
433 *edge->getCondition()->getInEdges().begin());
434 s32_t predicate = cmpStmt->getPredicate();
435
436 if (cmpStmt->getOpVarID(0) == IRGraph::NullPtr ||
437 cmpStmt->getOpVarID(1) == IRGraph::NullPtr)
438 return true;
439
441 {
442 getAbsValue(cmpStmt->getOpVar(0), pred),
443 getAbsValue(cmpStmt->getOpVar(1), pred)
444 };
445 if (opVal[0].isAddr() || opVal[1].isAddr())
446 return true;
447
448 // Feasibility check: cmp result must be compatible with branch successor
451 if (resVal.isBottom())
452 return false;
453
454 // For each operand: if it is the variable side (non-constant), has a
455 // backing load, and the branch imposes a useful constraint, narrow the
456 // ObjVar behind the load.
457 for (int i = 0; i < 2; i++)
458 {
459 if (opVal[i].getInterval().is_numeral() || !opVal[1-i].getInterval().is_numeral())
460 continue; // skip constant operands and var-vs-var
461
462 if (const LoadStmt* load = findBackingLoad(cmpStmt->getOpVar(i)))
463 {
465 predicate, succ, i == 0,
466 opVal[i].getInterval(), opVal[1-i].getInterval());
467
468 if (!narrowed.isTop())
469 {
470 const AbstractValue& ptrVal = getAbsValue(load->getRHSVar(), pred);
471 if (ptrVal.isAddr())
472 {
473 for (const auto& addr : ptrVal.getAddrs())
474 {
475 NodeID objId = as.getIDFromAddr(addr);
476 if (as.inAddrToValTable(objId))
477 as.load(addr).meet_with(narrowed);
478 }
479 }
480 }
481 }
482 }
483 return true;
484}
485
488{
489 const ICFGNode* pred = edge->getSrcNode();
490 s64_t succ = edge->getSuccessorCondValue();
491 const SVFVar* var = edge->getCondition();
492
493 if (!as.inVarToValTable(var->getId()) && !as.inVarToAddrsTable(var->getId()))
494 as[var->getId()] = getAbsValue(var, pred);
495 IntervalValue& switch_cond = as[var->getId()].getInterval();
497 if (switch_cond.isBottom())
498 return false;
499
501 for (SVFStmt* stmt : var->getInEdges())
502 stmtList.push(stmt);
503 while (!stmtList.empty())
504 {
505 const SVFStmt* stmt = stmtList.pop();
506 if (const LoadStmt* load = SVFUtil::dyn_cast<LoadStmt>(stmt))
507 {
508 const AbstractValue& ptrVal = getAbsValue(load->getRHSVar(), pred);
509 if (ptrVal.isAddr())
510 {
511 for (const auto& addr : ptrVal.getAddrs())
512 {
513 NodeID objId = as.getIDFromAddr(addr);
514 if (as.inAddrToValTable(objId))
515 as.load(addr).meet_with(switch_cond);
516 }
517 }
518 }
519 }
520 return true;
521}
522
525{
526 const SVFVar* cmpVar = edge->getCondition();
527 assert(!cmpVar->getInEdges().empty() && "branch condition has no defining edge?");
528 if (SVFUtil::isa<CmpStmt>(*cmpVar->getInEdges().begin()))
529 return isCmpBranchFeasible(edge, as);
531}
532
540{
541 // Check reachability: pre-state must have been propagated by predecessors
542 bool isFunEntry = SVFUtil::isa<FunEntryICFGNode>(node);
543 if (!hasAbsState(node))
544 {
545 if (isFunEntry)
546 {
547 // Entry point with no callers: inherit from global node
551 else
553 }
554 else
555 {
556 return false; // unreachable node
557 }
558 }
559
560 // Store the previous state for fixpoint detection
562
563 stat->getBlockTrace()++;
565
566 // Handle SVF statements
567 for (const SVFStmt *stmt: node->getSVFStmts())
568 {
570 }
571
572 // Handle call sites
573 if (const CallICFGNode* callNode = SVFUtil::dyn_cast<CallICFGNode>(node))
574 {
576 }
577
578 // Run detectors
579 for (auto& detector: detectors)
580 detector->detect(node);
582
583 // Track this node as analyzed (for coverage statistics across all entry points)
584 allAnalyzedNodes.insert(node);
585
586 if (getAbsState(node) == prevState)
587 return false;
588
589 return true;
590}
591
599{
600 auto it = preAnalysis->getFuncToWTO().find(funEntry->getFun());
601 if (it == preAnalysis->getFuncToWTO().end())
602 return;
603
604 // Push all top-level WTO components into the worklist in WTO order
605 FIFOWorkList<const ICFGWTOComp*> worklist(it->second->getWTOComponents());
606
607 while (!worklist.empty())
608 {
609 const ICFGWTOComp* comp = worklist.pop();
610
611 if (const ICFGSingletonWTO* singleton = SVFUtil::dyn_cast<ICFGSingletonWTO>(comp))
612 {
613 const ICFGNode* node = singleton->getICFGNode();
615 handleICFGNode(node);
616 }
617 else if (const ICFGCycleWTO* cycle = SVFUtil::dyn_cast<ICFGCycleWTO>(comp))
618 {
619 if (mergeStatesFromPredecessors(cycle->head()->getICFGNode()))
621 }
622 }
623}
624
625
627{
628 if (const CallICFGNode* callNode = SVFUtil::dyn_cast<CallICFGNode>(node))
629 {
630 if (isExtCall(callNode))
631 {
633 }
634 else
635 {
636 // Handle both direct and indirect calls uniformly
638 }
639 }
640 else
641 assert (false && "it is not call node");
642}
643
645{
646 return SVFUtil::isExtCall(callNode->getCalledFunction());
647}
648
650{
652 for (auto& detector : detectors)
653 {
654 detector->handleStubFunctions(callNode);
655 }
656}
657
663
667{
668 const RetICFGNode *retNode = callNode->getRetICFGNode();
669
670 // 1. Set return value to TOP
671 if (retNode->getSVFStmts().size() > 0)
672 {
673 if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(*retNode->getSVFStmts().begin()))
674 {
675 if (!retPE->getLHSVar()->isPointer() &&
676 !retPE->getLHSVar()->isConstDataOrAggDataButNotNullPtr())
677 updateAbsValue(retPE->getLHSVar(), IntervalValue::top(), callNode);
678 }
679 }
680
681 // 2. Set all stores in callee's reachable BBs to TOP
682 if (retNode->getOutEdges().size() > 1)
683 {
685 return;
686 }
687 for (const SVFBasicBlock* bb : callNode->getCalledFunction()->getReachableBBs())
688 {
689 for (const ICFGNode* node : bb->getICFGNodeList())
690 {
691 for (const SVFStmt* stmt : node->getSVFStmts())
692 {
693 if (const StoreStmt* store = SVFUtil::dyn_cast<StoreStmt>(stmt))
694 {
695 const SVFVar* rhsVar = store->getRHSVar();
696 if (!rhsVar->isPointer() && !rhsVar->isConstDataOrAggDataButNotNullPtr())
697 {
698 const AbstractValue& addrs = getAbsValue(store->getLHSVar(), callNode);
699 if (addrs.isAddr())
700 {
702 for (const auto& addr : addrs.getAddrs())
704 }
705 }
706 }
707 }
708 }
709 }
710
711 // 3. Copy callNode's state to retNode
713}
714
722
725{
726 // Direct call: get callee directly from call node
727 if (const FunObjVar* callee = callNode->getCalledFunction())
728 return callee;
729
730 // Indirect call: resolve callee through pointer analysis
732 auto it = callsiteMaps.find(callNode);
733 if (it == callsiteMaps.end())
734 return nullptr;
735
736 NodeID call_id = it->second;
737 if (!hasAbsState(callNode))
738 return nullptr;
739
741 if (!Addrs.isAddr() || Addrs.getAddrs().empty())
742 return nullptr;
743
744 NodeID addr = *Addrs.getAddrs().begin();
745 const SVFVar* func_var = getSVFVar(getAbsState(callNode).getIDFromAddr(addr));
746 return SVFUtil::dyn_cast<FunObjVar>(func_var);
747}
748
751{
753 if (!callee)
754 return false;
755
756 // Non-recursive function: never skip, always inline
758 return false;
759
760 // For recursive functions, skip only recursive callsites (within same SCC).
761 // Entry calls (from outside SCC) are not skipped - they are inlined so that
762 // handleLoopOrRecursion() can analyze the function body.
763 // This applies uniformly to all modes (TOP/WIDEN_ONLY/WIDEN_NARROW).
765}
766
769{
770 // Non-recursive functions (regular loops): always apply narrowing
771 if (!isRecursiveFun(fun))
772 return true;
773
774 // Recursive functions: WIDEN_NARROW applies narrowing, WIDEN_ONLY does not
775 // TOP mode exits early in handleLoopOrRecursion, so should not reach here
776 switch (Options::HandleRecur())
777 {
778 case TOP:
779 assert(false && "TOP mode should not reach narrowing phase for recursive functions");
780 return false;
781 case WIDEN_ONLY:
782 return false; // Skip narrowing for recursive functions
783 case WIDEN_NARROW:
784 return true; // Apply narrowing for recursive functions
785 default:
786 assert(false && "Unknown recursion handling mode");
787 return false;
788 }
789}
800{
802 return;
803
804 // Direct call: callee is known
805 if (const FunObjVar* callee = callNode->getCalledFunction())
806 {
809 const RetICFGNode* retNode = callNode->getRetICFGNode();
811 return;
812 }
813
814 // Indirect call: use Andersen's call graph to get all resolved callees.
815 const RetICFGNode* retNode = callNode->getRetICFGNode();
817 {
819 for (const FunObjVar* callee : callees)
820 {
821 if (callee->isDeclaration())
822 continue;
825 }
826 }
827 // Resume return node from caller's state (context-insensitive)
829}
830
871
872// ---------------------------------------------------------------------------
873// Semi-sparse cycle helpers
874// ---------------------------------------------------------------------------
875//
876// In semi-sparse mode, ValVars live at their def-sites and do not flow
877// through the cycle-head merge. The around-merge widening at cycle_head
878// therefore cannot observe ValVar growth across iterations (e.g. an
879// ArgValVar that a recursive CallPE keeps overwriting at the FunEntry).
880//
881// To fix that, handleLoopOrRecursion runs an extra cross-iter widening on
882// a snapshot that pulls every cycle ValVar into cycle_head's state. The set
883// of ValVar IDs per cycle is precomputed once after PreAnalysis::initWTO()
884// (initCycleValVars), bottom-up so nested cycles are handled before their
885// enclosing cycle.
886
887// --- Cycle state helpers (dense/sparse unified) ---
888
890{
891 const ICFGNode* cycle_head = cycle->head()->getICFGNode();
895
897 {
899 if (valVars.empty())
900 return snap; // dense path / no cycle ValVars: snap is already complete
901
902 // Semi-sparse: drop any stale ValVar entries cached at cycle_head and
903 // pull each cycle ValVar from its def-site. ValVars without a stored
904 // value are skipped to avoid the top-fallback contamination.
905 snap.clearValVars();
906 for (const ValVar* v : valVars)
907 {
908 const ICFGNode* defSite = v->getICFGNode();
909 if (!defSite || !hasAbsValue(v, defSite)) continue;
910 snap[v->getId()] = getAbsValue(v, defSite);
911 }
912 return snap;
913 }
914 else
915 {
916 return snap;
917 }
918}
919
920
922 const AbstractState& prev, const AbstractState& cur, const ICFGCycleWTO* cycle)
923{
926 // Always write back (even at fixpoint) so cycle_head's trace holds the
927 // widened state for the upcoming narrowing phase.
928 const ICFGNode* cycle_head = cycle->head()->getICFGNode();
931 {
932 for (const auto& [id, val] : next.getVarToVal())
933 {
935 }
936 }
937 return next == prev;
938}
939
941 const AbstractState& prev, const AbstractState& cur, const ICFGCycleWTO* cycle)
942{
943 const ICFGNode* cycle_head = cycle->head()->getICFGNode();
944 if (!shouldApplyNarrowing(cycle_head->getFun()))
945 return true;
948 if (next == prev)
949 return true; // fixpoint
952 {
953 for (const auto& [id, val] : next.getVarToVal())
954 {
956 }
957 }
958 return false;
959}
960
961
963{
964 const ICFGNode* cycle_head = cycle->head()->getICFGNode();
965
966 // TOP mode for recursive function cycles: set all stores and return value to TOP
967 if (Options::HandleRecur() == TOP && isRecursiveFun(cycle_head->getFun()))
968 {
969 if (caller)
971 return;
972 }
973
974 // Iterate until fixpoint with widening/narrowing on the cycle head.
975 bool increasing = true;
977 for (u32_t cur_iter = 0;; cur_iter++)
978 {
979 if (cur_iter >= widen_delay)
980 {
981 // getFullCycleHeadState handles dense (returns trace[cycle_head])
982 // and semi-sparse (collects ValVars from def-sites) uniformly.
984
988
989 if (increasing)
990 {
991 if (widenCycleState(prev, cur, cycle))
992 {
993 increasing = false;
994 continue;
995 }
996 }
997 else
998 {
999 if (narrowCycleState(prev, cur, cycle))
1000 break;
1001 }
1002 }
1003 else
1004 {
1005 // Before widen_delay: process cycle head with gated pattern
1008 }
1009
1010 // Process cycle body components (each with gated merge+handle)
1011 for (const ICFGWTOComp* comp : cycle->getWTOComponents())
1012 {
1013 if (const ICFGSingletonWTO* singleton = SVFUtil::dyn_cast<ICFGSingletonWTO>(comp))
1014 {
1015 const ICFGNode* node = singleton->getICFGNode();
1017 handleICFGNode(node);
1018 }
1019 else if (const ICFGCycleWTO* subCycle = SVFUtil::dyn_cast<ICFGCycleWTO>(comp))
1020 {
1021 if (mergeStatesFromPredecessors(subCycle->head()->getICFGNode()))
1023 }
1024 }
1025 }
1026}
1027
1029{
1030 if (const AddrStmt *addr = SVFUtil::dyn_cast<AddrStmt>(stmt))
1031 {
1033 }
1034 else if (const BinaryOPStmt *binary = SVFUtil::dyn_cast<BinaryOPStmt>(stmt))
1035 {
1037 }
1038 else if (const CmpStmt *cmp = SVFUtil::dyn_cast<CmpStmt>(stmt))
1039 {
1041 }
1042 else if (SVFUtil::isa<UnaryOPStmt>(stmt))
1043 {
1044 }
1045 else if (SVFUtil::isa<BranchStmt>(stmt))
1046 {
1047 // branch stmt is handled in hasBranchES
1048 }
1049 else if (const LoadStmt *load = SVFUtil::dyn_cast<LoadStmt>(stmt))
1050 {
1051 updateStateOnLoad(load);
1052 }
1053 else if (const StoreStmt *store = SVFUtil::dyn_cast<StoreStmt>(stmt))
1054 {
1055 updateStateOnStore(store);
1056 }
1057 else if (const CopyStmt *copy = SVFUtil::dyn_cast<CopyStmt>(stmt))
1058 {
1060 }
1061 else if (const GepStmt *gep = SVFUtil::dyn_cast<GepStmt>(stmt))
1062 {
1064 }
1065 else if (const SelectStmt *select = SVFUtil::dyn_cast<SelectStmt>(stmt))
1066 {
1068 }
1069 else if (const PhiStmt *phi = SVFUtil::dyn_cast<PhiStmt>(stmt))
1070 {
1072 }
1073 else if (const CallPE *callPE = SVFUtil::dyn_cast<CallPE>(stmt))
1074 {
1075 // To handle Call Edge
1076 updateStateOnCall(callPE);
1077 }
1078 else if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(stmt))
1079 {
1080 updateStateOnRet(retPE);
1081 }
1082 else
1083 assert(false && "implement this part");
1084 // NullPtr should not be changed by any statement. If the entry is missing
1085 // (not yet auto-inserted) we treat that as "unchanged" — only check the
1086 // entry if it actually exists.
1087 {
1088 const auto& vmap = getAbsState(stmt->getICFGNode()).getVarToVal();
1089 auto it = vmap.find(IRGraph::NullPtr);
1090 assert(it == vmap.end() ||
1091 (!it->second.isInterval() && !it->second.isAddr()));
1092 }
1093}
1094
1096{
1097 const ICFGNode* node = gep->getICFGNode();
1099 AddressValue gepAddrs = svfStateMgr->getGepObjAddrs(SVFUtil::cast<ValVar>(gep->getRHSVar()), offsetPair);
1100 updateAbsValue(gep->getLHSVar(), gepAddrs, node);
1101}
1102
1104{
1105 const ICFGNode* node = select->getICFGNode();
1106 const AbstractValue& condVal = getAbsValue(select->getCondition(), node);
1107 const AbstractValue& tVal = getAbsValue(select->getTrueValue(), node);
1108 const AbstractValue& fVal = getAbsValue(select->getFalseValue(), node);
1110 if (condVal.getInterval().is_numeral())
1111 {
1113 }
1114 else
1115 {
1116 resVal = tVal;
1117 resVal.join_with(fVal);
1118 }
1119 updateAbsValue(select->getRes(), resVal, node);
1120}
1121
1123{
1124 const ICFGNode* icfgNode = phi->getICFGNode();
1126 for (u32_t i = 0; i < phi->getOpVarNum(); i++)
1127 {
1128 const ICFGNode* opICFGNode = phi->getOpICFGNode(i);
1130 {
1132 const AbstractValue& opVal = getAbsValue(phi->getOpVar(i), opICFGNode);
1134 if (edge)
1135 {
1136 const IntraCFGEdge* intraEdge = SVFUtil::cast<IntraCFGEdge>(edge);
1137 if (intraEdge->getCondition())
1138 {
1140 rhs.join_with(opVal);
1141 }
1142 else
1143 rhs.join_with(opVal);
1144 }
1145 else
1146 {
1147 rhs.join_with(opVal);
1148 }
1149 }
1150 }
1151 updateAbsValue(phi->getRes(), rhs, icfgNode);
1152}
1153
1154
1158{
1159 const ICFGNode* node = callPE->getICFGNode();
1160 const SVFVar* res = callPE->getRes();
1162 for (u32_t i = 0; i < callPE->getOpVarNum(); i++)
1163 {
1164 const ICFGNode* opICFGNode = callPE->getOpCallICFGNode(i);
1166 {
1167 const AbstractValue& opVal = getAbsValue(callPE->getOpVar(i), opICFGNode);
1168 rhs.join_with(opVal);
1169 }
1170 }
1171 updateAbsValue(res, rhs, node);
1172}
1173
1175{
1176 const ICFGNode* node = retPE->getICFGNode();
1177 const AbstractValue& rhsVal = getAbsValue(retPE->getRHSVar(), node);
1178 updateAbsValue(retPE->getLHSVar(), rhsVal, node);
1179}
1180
1181
1183{
1184 const ICFGNode* node = addr->getICFGNode();
1185 // initObjVar mutates _varToAbsVal/_addrToAbsVal directly, so we need
1186 // mutable access; route via the manager.
1188 as.initObjVar(SVFUtil::cast<ObjVar>(addr->getRHSVar()));
1189 // AddrStmt: lhs(ValVar) = &rhs(ObjVar).
1190 // as[rhsId] stores the ObjVar's virtual address in _varToVal,
1191 // NOT the object contents. So we must use as[] directly for ObjVar.
1192 u32_t rhsId = addr->getRHSVarID();
1193 if (addr->getRHSVar()->getType()->getKind() == SVFType::SVFIntegerTy)
1194 as[rhsId].getInterval().meet_with(utils->getRangeLimitFromType(addr->getRHSVar()->getType()));
1195 // LHS is a ValVar (pointer), write through the API
1196 updateAbsValue(addr->getLHSVar(), as[rhsId], node);
1197}
1198
1199
1201{
1202 const ICFGNode* node = binary->getICFGNode();
1203 // Treat bottom (uninitialized) operands as top for soundness
1204 const AbstractValue& op0Val = getAbsValue(binary->getOpVar(0), node);
1205 const AbstractValue& op1Val = getAbsValue(binary->getOpVar(1), node);
1206 IntervalValue lhs = op0Val.getInterval().isBottom() ? IntervalValue::top() : op0Val.getInterval();
1207 IntervalValue rhs = op1Val.getInterval().isBottom() ? IntervalValue::top() : op1Val.getInterval();
1209 switch (binary->getOpcode())
1210 {
1211 case BinaryOPStmt::Add:
1212 case BinaryOPStmt::FAdd:
1213 resVal = (lhs + rhs);
1214 break;
1215 case BinaryOPStmt::Sub:
1216 case BinaryOPStmt::FSub:
1217 resVal = (lhs - rhs);
1218 break;
1219 case BinaryOPStmt::Mul:
1220 case BinaryOPStmt::FMul:
1221 resVal = (lhs * rhs);
1222 break;
1223 case BinaryOPStmt::SDiv:
1224 case BinaryOPStmt::FDiv:
1225 case BinaryOPStmt::UDiv:
1226 resVal = (lhs / rhs);
1227 break;
1228 case BinaryOPStmt::SRem:
1229 case BinaryOPStmt::FRem:
1230 case BinaryOPStmt::URem:
1231 resVal = (lhs % rhs);
1232 break;
1233 case BinaryOPStmt::Xor:
1234 resVal = (lhs ^ rhs);
1235 break;
1236 case BinaryOPStmt::And:
1237 resVal = (lhs & rhs);
1238 break;
1239 case BinaryOPStmt::Or:
1240 resVal = (lhs | rhs);
1241 break;
1242 case BinaryOPStmt::AShr:
1243 resVal = (lhs >> rhs);
1244 break;
1245 case BinaryOPStmt::Shl:
1246 resVal = (lhs << rhs);
1247 break;
1248 case BinaryOPStmt::LShr:
1249 resVal = (lhs >> rhs);
1250 break;
1251 default:
1252 assert(false && "undefined binary: ");
1253 }
1254 updateAbsValue(binary->getRes(), resVal, node);
1255}
1256
1258{
1259 const ICFGNode* node = cmp->getICFGNode();
1260 u32_t op0 = cmp->getOpVarID(0);
1261 u32_t op1 = cmp->getOpVarID(1);
1262 const AbstractValue& op0Val = getAbsValue(cmp->getOpVar(0), node);
1263 const AbstractValue& op1Val = getAbsValue(cmp->getOpVar(1), node);
1264
1265 // if it is address
1266 if (op0Val.isAddr() && op1Val.isAddr())
1267 {
1269 const AddressValue& addrOp0 = op0Val.getAddrs();
1270 const AddressValue& addrOp1 = op1Val.getAddrs();
1271 if (addrOp0.equals(addrOp1))
1272 {
1273 resVal = IntervalValue(1, 1);
1274 }
1275 else if (addrOp0.hasIntersect(addrOp1))
1276 {
1277 resVal = IntervalValue(0, 1);
1278 }
1279 else
1280 {
1281 resVal = IntervalValue(0, 0);
1282 }
1283 updateAbsValue(cmp->getRes(), resVal, node);
1284 }
1285 // if op0 or op1 is nullptr, compare abstractValue instead of touching addr or interval
1286 else if (op0 == IRGraph::NullPtr || op1 == IRGraph::NullPtr)
1287 {
1288 IntervalValue resVal = (op0Val.equals(op1Val)) ? IntervalValue(1, 1) : IntervalValue(0, 0);
1289 updateAbsValue(cmp->getRes(), resVal, node);
1290 }
1291 else
1292 {
1293 {
1295 if (op0Val.isInterval() && op1Val.isInterval())
1296 {
1297 // Treat bottom (uninitialized) operands as top for soundness
1298 IntervalValue lhs = op0Val.getInterval().isBottom() ? IntervalValue::top() : op0Val.getInterval(),
1299 rhs = op1Val.getInterval().isBottom() ? IntervalValue::top() : op1Val.getInterval();
1300 // AbstractValue
1301 auto predicate = cmp->getPredicate();
1302 switch (predicate)
1303 {
1304 case CmpStmt::ICMP_EQ:
1305 case CmpStmt::FCMP_OEQ:
1306 case CmpStmt::FCMP_UEQ:
1307 resVal = (lhs == rhs);
1308 // resVal = (lhs.getInterval() == rhs.getInterval());
1309 break;
1310 case CmpStmt::ICMP_NE:
1311 case CmpStmt::FCMP_ONE:
1312 case CmpStmt::FCMP_UNE:
1313 resVal = (lhs != rhs);
1314 break;
1315 case CmpStmt::ICMP_UGT:
1316 case CmpStmt::ICMP_SGT:
1317 case CmpStmt::FCMP_OGT:
1318 case CmpStmt::FCMP_UGT:
1319 resVal = (lhs > rhs);
1320 break;
1321 case CmpStmt::ICMP_UGE:
1322 case CmpStmt::ICMP_SGE:
1323 case CmpStmt::FCMP_OGE:
1324 case CmpStmt::FCMP_UGE:
1325 resVal = (lhs >= rhs);
1326 break;
1327 case CmpStmt::ICMP_ULT:
1328 case CmpStmt::ICMP_SLT:
1329 case CmpStmt::FCMP_OLT:
1330 case CmpStmt::FCMP_ULT:
1331 resVal = (lhs < rhs);
1332 break;
1333 case CmpStmt::ICMP_ULE:
1334 case CmpStmt::ICMP_SLE:
1335 case CmpStmt::FCMP_OLE:
1336 case CmpStmt::FCMP_ULE:
1337 resVal = (lhs <= rhs);
1338 break;
1340 resVal = IntervalValue(0, 0);
1341 break;
1342 case CmpStmt::FCMP_TRUE:
1343 resVal = IntervalValue(1, 1);
1344 break;
1345 case CmpStmt::FCMP_ORD:
1346 case CmpStmt::FCMP_UNO:
1347 // FCMP_ORD: true if both operands are not NaN
1348 // FCMP_UNO: true if either operand is NaN
1349 // Conservatively return [0, 1] since we don't track NaN
1350 resVal = IntervalValue(0, 1);
1351 break;
1352 default:
1353 assert(false && "undefined compare: ");
1354 }
1355 updateAbsValue(cmp->getRes(), resVal, node);
1356 }
1357 else if (op0Val.isAddr() && op1Val.isAddr())
1358 {
1359 const AddressValue& lhs = op0Val.getAddrs();
1360 const AddressValue& rhs = op1Val.getAddrs();
1361 auto predicate = cmp->getPredicate();
1362 switch (predicate)
1363 {
1364 case CmpStmt::ICMP_EQ:
1365 case CmpStmt::FCMP_OEQ:
1366 case CmpStmt::FCMP_UEQ:
1367 {
1368 if (lhs.hasIntersect(rhs))
1369 {
1370 resVal = IntervalValue(0, 1);
1371 }
1372 else if (lhs.empty() && rhs.empty())
1373 {
1374 resVal = IntervalValue(1, 1);
1375 }
1376 else
1377 {
1378 resVal = IntervalValue(0, 0);
1379 }
1380 break;
1381 }
1382 case CmpStmt::ICMP_NE:
1383 case CmpStmt::FCMP_ONE:
1384 case CmpStmt::FCMP_UNE:
1385 {
1386 if (lhs.hasIntersect(rhs))
1387 {
1388 resVal = IntervalValue(0, 1);
1389 }
1390 else if (lhs.empty() && rhs.empty())
1391 {
1392 resVal = IntervalValue(0, 0);
1393 }
1394 else
1395 {
1396 resVal = IntervalValue(1, 1);
1397 }
1398 break;
1399 }
1400 case CmpStmt::ICMP_UGT:
1401 case CmpStmt::ICMP_SGT:
1402 case CmpStmt::FCMP_OGT:
1403 case CmpStmt::FCMP_UGT:
1404 {
1405 if (lhs.size() == 1 && rhs.size() == 1)
1406 {
1407 resVal = IntervalValue(*lhs.begin() > *rhs.begin());
1408 }
1409 else
1410 {
1411 resVal = IntervalValue(0, 1);
1412 }
1413 break;
1414 }
1415 case CmpStmt::ICMP_UGE:
1416 case CmpStmt::ICMP_SGE:
1417 case CmpStmt::FCMP_OGE:
1418 case CmpStmt::FCMP_UGE:
1419 {
1420 if (lhs.size() == 1 && rhs.size() == 1)
1421 {
1422 resVal = IntervalValue(*lhs.begin() >= *rhs.begin());
1423 }
1424 else
1425 {
1426 resVal = IntervalValue(0, 1);
1427 }
1428 break;
1429 }
1430 case CmpStmt::ICMP_ULT:
1431 case CmpStmt::ICMP_SLT:
1432 case CmpStmt::FCMP_OLT:
1433 case CmpStmt::FCMP_ULT:
1434 {
1435 if (lhs.size() == 1 && rhs.size() == 1)
1436 {
1437 resVal = IntervalValue(*lhs.begin() < *rhs.begin());
1438 }
1439 else
1440 {
1441 resVal = IntervalValue(0, 1);
1442 }
1443 break;
1444 }
1445 case CmpStmt::ICMP_ULE:
1446 case CmpStmt::ICMP_SLE:
1447 case CmpStmt::FCMP_OLE:
1448 case CmpStmt::FCMP_ULE:
1449 {
1450 if (lhs.size() == 1 && rhs.size() == 1)
1451 {
1452 resVal = IntervalValue(*lhs.begin() <= *rhs.begin());
1453 }
1454 else
1455 {
1456 resVal = IntervalValue(0, 1);
1457 }
1458 break;
1459 }
1461 resVal = IntervalValue(0, 0);
1462 break;
1463 case CmpStmt::FCMP_TRUE:
1464 resVal = IntervalValue(1, 1);
1465 break;
1466 case CmpStmt::FCMP_ORD:
1467 case CmpStmt::FCMP_UNO:
1468 // FCMP_ORD: true if both operands are not NaN
1469 // FCMP_UNO: true if either operand is NaN
1470 // Conservatively return [0, 1] since we don't track NaN
1471 resVal = IntervalValue(0, 1);
1472 break;
1473 default:
1474 assert(false && "undefined compare: ");
1475 }
1476 updateAbsValue(cmp->getRes(), resVal, node);
1477 }
1478 }
1479 }
1480}
1481
1483{
1484 const ICFGNode* node = load->getICFGNode();
1485 updateAbsValue(load->getLHSVar(),
1486 svfStateMgr->loadValue(SVFUtil::cast<ValVar>(load->getRHSVar()), node), node);
1487}
1488
1490{
1491 const ICFGNode* node = store->getICFGNode();
1492 svfStateMgr->storeValue(SVFUtil::cast<ValVar>(store->getLHSVar()),
1493 getAbsValue(store->getRHSVar(), node), node);
1494}
1495
1497{
1498 const ICFGNode* node = copy->getICFGNode();
1499 const SVFVar* lhsVar = copy->getLHSVar();
1500 const SVFVar* rhsVar = copy->getRHSVar();
1501
1502 auto getZExtValue = [&](const SVFVar* var)
1503 {
1504 const SVFType* type = var->getType();
1505 if (SVFUtil::isa<SVFIntegerType>(type))
1506 {
1507 u32_t bits = type->getByteSize() * 8;
1508 const AbstractValue& val = getAbsValue(var, node);
1509 if (val.getInterval().is_numeral())
1510 {
1511 if (bits == 8)
1512 {
1513 int8_t signed_i8_value = val.getInterval().getIntNumeral();
1516 }
1517 else if (bits == 16)
1518 {
1519 s16_t signed_i16_value = val.getInterval().getIntNumeral();
1522 }
1523 else if (bits == 32)
1524 {
1525 s32_t signed_i32_value = val.getInterval().getIntNumeral();
1528 }
1529 else if (bits == 64)
1530 {
1531 s64_t signed_i64_value = val.getInterval().getIntNumeral();
1533 }
1534 else
1535 assert(false && "cannot support int type other than u8/16/32/64");
1536 }
1537 else
1538 {
1539 return IntervalValue::top();
1540 }
1541 }
1542 return IntervalValue::top();
1543 };
1544
1545 auto getTruncValue = [&](const SVFVar* var, const SVFType* dstType)
1546 {
1547 const IntervalValue& itv = getAbsValue(var, node).getInterval();
1548 if(itv.isBottom()) return itv;
1550 s64_t int_ub = itv.ub().getIntNumeral();
1551 u32_t dst_bits = dstType->getByteSize() * 8;
1552 if (dst_bits == 8)
1553 {
1554 int8_t s8_lb = static_cast<int8_t>(int_lb);
1555 int8_t s8_ub = static_cast<int8_t>(int_ub);
1556 if (s8_lb > s8_ub)
1558 return IntervalValue(s8_lb, s8_ub);
1559 }
1560 else if (dst_bits == 16)
1561 {
1562 s16_t s16_lb = static_cast<s16_t>(int_lb);
1563 s16_t s16_ub = static_cast<s16_t>(int_ub);
1564 if (s16_lb > s16_ub)
1566 return IntervalValue(s16_lb, s16_ub);
1567 }
1568 else if (dst_bits == 32)
1569 {
1570 s32_t s32_lb = static_cast<s32_t>(int_lb);
1571 s32_t s32_ub = static_cast<s32_t>(int_ub);
1572 if (s32_lb > s32_ub)
1574 return IntervalValue(s32_lb, s32_ub);
1575 }
1576 else
1577 {
1578 assert(false && "cannot support dst int type other than u8/16/32");
1579 abort();
1580 }
1581 };
1582
1583 const AbstractValue& rhsVal = getAbsValue(rhsVar, node);
1584
1585 if (copy->getCopyKind() == CopyStmt::COPYVAL)
1586 {
1588 }
1589 else if (copy->getCopyKind() == CopyStmt::ZEXT)
1590 {
1591 updateAbsValue(lhsVar, getZExtValue(rhsVar), node);
1592 }
1593 else if (copy->getCopyKind() == CopyStmt::SEXT)
1594 {
1595 updateAbsValue(lhsVar, rhsVal.getInterval(), node);
1596 }
1597 else if (copy->getCopyKind() == CopyStmt::FPTOSI)
1598 {
1599 updateAbsValue(lhsVar, rhsVal.getInterval(), node);
1600 }
1601 else if (copy->getCopyKind() == CopyStmt::FPTOUI)
1602 {
1603 updateAbsValue(lhsVar, rhsVal.getInterval(), node);
1604 }
1605 else if (copy->getCopyKind() == CopyStmt::SITOFP)
1606 {
1607 updateAbsValue(lhsVar, rhsVal.getInterval(), node);
1608 }
1609 else if (copy->getCopyKind() == CopyStmt::UITOFP)
1610 {
1611 updateAbsValue(lhsVar, rhsVal.getInterval(), node);
1612 }
1613 else if (copy->getCopyKind() == CopyStmt::TRUNC)
1614 {
1615 updateAbsValue(lhsVar, getTruncValue(rhsVar, lhsVar->getType()), node);
1616 }
1617 else if (copy->getCopyKind() == CopyStmt::FPTRUNC)
1618 {
1619 updateAbsValue(lhsVar, rhsVal.getInterval(), node);
1620 }
1621 else if (copy->getCopyKind() == CopyStmt::INTTOPTR)
1622 {
1623 //insert nullptr
1624 }
1625 else if (copy->getCopyKind() == CopyStmt::PTRTOINT)
1626 {
1628 }
1629 else if (copy->getCopyKind() == CopyStmt::BITCAST)
1630 {
1631 if (rhsVal.isAddr())
1633 }
1634 else
1635 assert(false && "undefined copy kind");
1636}
1637
1638
1639
static const LoadStmt * findBackingLoad(const SVFVar *var)
static IntervalValue computeCmpConstraint(s32_t predicate, s64_t succ, bool isLHS, const IntervalValue &self, const IntervalValue &other)
#define BlackHoleObjAddr
item next
Definition cJSON.cpp:2224
newitem type
Definition cJSON.cpp:2739
copy
Definition cJSON.cpp:414
newitem prev
Definition cJSON.cpp:2285
void finializeStat()
Definition AEStat.cpp:44
u32_t & getBlockTrace()
Definition AEStat.h:70
void performStat() override
Definition AEStat.cpp:121
u32_t & getICFGNodeTrace()
Definition AEStat.h:78
void countStateSize()
Definition AEStat.cpp:31
Handles external API calls and manages abstract states.
Definition AbsExtAPI.h:44
void collectCheckPoint()
void handleExtAPI(const CallICFGNode *call)
Handles an external API call.
void checkPointAllSet()
IntervalValue getRangeLimitFromType(const SVFType *type)
Gets the range limit from a type.
void updateStateOnCall(const CallPE *callPE)
const FunObjVar * getCallee(const CallICFGNode *callNode)
Get callee function: directly for direct calls, via pointer analysis for indirect calls.
bool isBranchFeasible(const IntraCFGEdge *edge, AbstractState &as)
Returns true if the branch is reachable; narrows as in-place.
void updateStateOnStore(const StoreStmt *store)
bool hasAbsState(const ICFGNode *node)
bool narrowCycleState(const AbstractState &prev, const AbstractState &cur, const ICFGCycleWTO *cycle)
Narrow prev with cur; write the narrowed state back and scatter.
virtual void handleFunCall(const CallICFGNode *callNode)
void updateAbsValue(const SVFVar *var, const AbstractValue &val, const ICFGNode *node)
void analyzeFromAllProgEntries()
Analyze all entry points (functions without callers)
void updateStateOnGep(const GepStmt *gep)
virtual void handleGlobalNode()
Initialize abstract state for the global ICFG node and process global statements.
void handleFunction(const ICFGNode *funEntry, const CallICFGNode *caller=nullptr)
Handle a function body via worklist-driven WTO traversal starting from funEntry.
virtual bool isExtCall(const CallICFGNode *callNode)
bool isSwitchBranchFeasible(const IntraCFGEdge *edge, AbstractState &as)
Returns true if the switch branch is feasible; narrows as in-place.
AbstractState getFullCycleHeadState(const ICFGCycleWTO *cycle)
void updateStateOnPhi(const PhiStmt *phi)
bool handleICFGNode(const ICFGNode *node)
Handle an ICFG node: execute statements; return true if state changed.
std::vector< std::unique_ptr< AEDetector > > detectors
virtual void handleExtCall(const CallICFGNode *callNode)
virtual void skipRecursionWithTop(const CallICFGNode *callNode)
bool widenCycleState(const AbstractState &prev, const AbstractState &cur, const ICFGCycleWTO *cycle)
const AbstractValue & getAbsValue(const SVFVar *var, const ICFGNode *node)
bool mergeStatesFromPredecessors(const ICFGNode *node)
void updateStateOnSelect(const SelectStmt *select)
virtual void handleSVFStatement(const SVFStmt *stmt)
Dispatch an SVF statement (Addr/Binary/Cmp/Load/Store/Copy/Gep/Select/Phi/Call/Ret) to its handler.
bool skipRecursiveCall(const CallICFGNode *callNode)
Skip recursive callsites (within SCC); entry calls from outside SCC are not skipped.
SVFIR * svfir
protected data members, also used in subclasses
virtual void runOnModule(ICFG *icfg)
virtual bool isRecursiveCallSite(const CallICFGNode *callNode, const FunObjVar *)
Check if caller and callee are in the same CallGraph SCC (i.e. a recursive callsite)
bool shouldApplyNarrowing(const FunObjVar *fun)
Check if narrowing should be applied: always for regular loops, mode-dependent for recursion.
virtual bool isRecursiveFun(const FunObjVar *fun)
Check if a function is recursive (part of a call graph SCC)
void updateStateOnAddr(const AddrStmt *addr)
virtual ~AbstractInterpretation()
Destructor.
bool hasAbsValue(const SVFVar *var, const ICFGNode *node)
virtual void handleCallSite(const ICFGNode *node)
Handle a call site node: dispatch to ext-call, direct-call, or indirect-call handling.
void updateStateOnRet(const RetPE *retPE)
void updateStateOnCopy(const CopyStmt *copy)
const AbstractState & getAbsState(const ICFGNode *node) const
void propagateObjVarAbsVal(const ObjVar *var, const ICFGNode *defSite)
Propagate an ObjVar's abstract value from defSite to all its use-sites.
std::deque< const FunObjVar * > collectProgEntryFuns()
Get all entry point functions (functions without callers)
void updateStateOnLoad(const LoadStmt *load)
void updateStateOnBinary(const BinaryOPStmt *binary)
bool isCmpBranchFeasible(const IntraCFGEdge *edge, AbstractState &as)
Returns true if the cmp-conditional branch is feasible; narrows as in-place.
Set< const ICFGNode * > allAnalyzedNodes
void updateAbsState(const ICFGNode *node, const AbstractState &state)
virtual void handleLoopOrRecursion(const ICFGCycleWTO *cycle, const CallICFGNode *caller=nullptr)
Handle a WTO cycle (loop or recursive function) using widening/narrowing iteration.
const SVFVar * getSVFVar(NodeID varId) const
Retrieve SVFVar given its ID; asserts if no such variable exists.
void updateStateOnCmp(const CmpStmt *cmp)
void storeValue(const ValVar *pointer, const AbstractValue &val, const ICFGNode *node)
Map< const ICFGNode *, AbstractState > & getTrace()
AbstractState & getAbstractState(const ICFGNode *node)
Retrieve the abstract state for a given ICFG node. Asserts if absent.
IntervalValue getGepElementIndex(const GepStmt *gep)
Compute the flattened element index for a GepStmt.
Set< const ICFGNode * > getUseSitesOfObjVar(const ObjVar *obj, const ICFGNode *node) const
Given an ObjVar and its use-site ICFGNode, find all downstream use-site ICFGNodes.
AddressValue getGepObjAddrs(const ValVar *pointer, IntervalValue offset)
Compute GEP object addresses for a pointer at a given element offset.
AbstractValue loadValue(const ValVar *pointer, const ICFGNode *node)
const VarToAbsValMap & getVarToVal() const
get var2val map
void store(u32_t addr, const AbstractValue &val)
void initObjVar(const ObjVar *objVar)
AbstractState narrowing(const AbstractState &other)
domain narrow with other, and return the narrowed domain
AbstractState widening(const AbstractState &other)
domain widen with other, and return the widened domain
IntervalValue & getInterval()
AddressValue & getAddrs()
bool isAddr() const
s64_t getIntNumeral() const
const FunObjVar * getFunction() const
Get function of this call node.
Definition CallGraph.h:191
bool hasIndCSCallees(const CallICFGNode *cs) const
Definition CallGraph.h:335
const FunctionSet & getIndCSCallees(const CallICFGNode *cs) const
Definition CallGraph.h:339
const CallICFGNode * getOpCallICFGNode(u32_t op_idx) const
Return the CallICFGNode of the i-th operand.
@ ICMP_SGT
signed greater than
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ ICMP_UGE
unsigned greater or equal
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ ICMP_ULE
unsigned less or equal
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ ICMP_NE
not equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_ULT
unsigned less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ ICMP_SLT
signed less than
@ ICMP_UGT
unsigned greater than
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_SLE
signed less or equal
bool empty() const
Definition WorkList.h:161
bool isDeclaration() const
iterator begin()
Iterators.
const GEdgeSetTy & getInEdges() const
const SVFStmtList & getSVFStmts() const
Definition ICFGNode.h:115
ICFGEdge * getICFGEdge(const ICFGNode *src, const ICFGNode *dst, ICFGEdge::ICFGEdgeK kind)
Get a SVFG edge according to src and dst.
Definition ICFG.cpp:311
void updateCallGraph(CallGraph *callgraph)
update ICFG for indirect calls
Definition ICFG.cpp:427
FunEntryICFGNode * getFunEntryICFGNode(const FunObjVar *fun)
Add a function entry node.
Definition ICFG.cpp:242
GlobalICFGNode * getGlobalICFGNode() const
Definition ICFG.h:244
NodeID getBlkPtr() const
Definition IRGraph.h:255
void meet_with(const IntervalValue &other)
Return a intersected IntervalValue.
static BoundedInt minus_infinity()
Get minus infinity -inf.
bool isBottom() const
bool is_zero() const
Return true if the IntervalValue is [0, 0].
static BoundedInt plus_infinity()
Get plus infinity +inf.
static IntervalValue top()
Create the IntervalValue [-inf, +inf].
const BoundedInt & lb() const
Return the lower bound.
const ValVar * getLHSVar() const
const ValVar * getRHSVar() const
const ValVar * getRes() const
Result SVFVar.
const ValVar * getOpVar(u32_t pos) const
Operand SVFVars.
u32_t getOpVarNum() const
static const OptionMap< u32_t > HandleRecur
recursion handling mode, Default: TOP
Definition Options.h:246
static const OptionMap< u32_t > AESparsity
Definition Options.h:243
static const Option< u32_t > WidenDelay
Definition Options.h:244
static const Option< bool > PStat
Definition Options.h:116
bool inSameCallGraphSCC(const FunObjVar *fun1, const FunObjVar *fun2)
Return TRUE if this edge is inside a PTACallGraph SCC, i.e., src node and dst node are in the same SC...
bool isInRecursion(const FunObjVar *fun) const
const Map< const FunObjVar *, const ICFGWTO * > & getFuncToWTO() const
Accessors for WTO data.
Definition PreAnalysis.h:72
const Set< const ValVar * > getCycleValVars(const ICFGCycleWTO *cycle) const
Definition PreAnalysis.h:83
AndersenWaveDiff * getPointerAnalysis() const
Accessors for Andersen's results.
Definition PreAnalysis.h:56
void initWTO()
Build WTO for each function using call graph SCC.
CallGraph * getCallGraph() const
Definition PreAnalysis.h:60
const ValVar * getRHSVar() const
const ValVar * getLHSVar() const
const CallSiteToFunPtrMap & getIndirectCallsites() const
Add/get indirect callsites.
Definition SVFIR.h:451
const SVFVar * getSVFVar(NodeID id) const
ObjVar/GepObjVar/BaseObjVar.
Definition SVFIR.h:133
static SVFIR * getPAG(bool buildFromFile=false)
Singleton design here to make sure we only have one instance during any analysis.
Definition SVFIR.h:118
virtual void endClk()
Definition SVFStat.h:63
virtual void startClk()
Definition SVFStat.h:58
ICFGNode * getICFGNode() const
u32_t getByteSize() const
Definition SVFType.h:289
virtual const std::string & getName() const
Definition SVFValue.h:189
const ValVar * getRHSVar() const
const ValVar * getLHSVar() const
bool isExtCall(const FunObjVar *fun)
Definition SVFUtil.cpp:437
for isBitcode
Definition BasicTypes.h:70
u32_t NodeID
Definition GeneralType.h:56
signed short s16_t
Definition GeneralType.h:54
unsigned short u16_t
Definition GeneralType.h:53
WTONode< ICFG > ICFGSingletonWTO
Definition ICFGWTO.h:45
llvm::IRBuilder IRBuilder
Definition BasicTypes.h:76
signed s32_t
Definition GeneralType.h:48
unsigned u32_t
Definition GeneralType.h:47
signed long long s64_t
Definition GeneralType.h:50
WTOComponent< ICFG > ICFGWTOComp
Definition ICFGWTO.h:44