Static Value-Flow Analysis
Loading...
Searching...
No Matches
AbstractInterpretation.cpp
Go to the documentation of this file.
1//===- AbstractExecution.cpp -- Abstract Execution---------------------------------//
2//
3// SVF: Static Value-Flow Analysis
4//
5// Copyright (C) <2013-> <Yulei Sui>
6//
7
8// This program is free software: you can redistribute it and/or modify
9// it under the terms of the GNU Affero General Public License as published by
10// the Free Software Foundation, either version 3 of the License, or
11// (at your option) any later version.
12
13// This program is distributed in the hope that it will be useful,
14// but WITHOUT ANY WARRANTY; without even the implied warranty of
15// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16// GNU Affero General Public License for more details.
17
18// You should have received a copy of the GNU Affero General Public License
19// along with this program. If not, see <http://www.gnu.org/licenses/>.
20//
21//===----------------------------------------------------------------------===//
22
23
24//
25// Created on: Jan 10, 2024
26// Author: Xiao Cheng, Jiawei Wang
27//
28
30#include "AE/Svfexe/AbsExtAPI.h"
31#include "SVFIR/SVFIR.h"
32#include "Util/Options.h"
33#include "Util/WorkList.h"
34#include "Graphs/CallGraph.h"
35#include "WPA/Andersen.h"
36#include <cmath>
37#include <deque>
38
39using namespace SVF;
40using namespace SVFUtil;
41
42
44{
45 stat->startClk();
46 icfg = _icfg;
49
50 // Run Andersen's pointer analysis and build WTO
55
58
59 analyse();
61 stat->endClk();
63 if (Options::PStat())
65 for (auto& detector: detectors)
66 detector->reportBug();
67}
68
73
75{
76 if (abstractTrace.count(node) == 0)
77 {
78 assert(false && "No preAbsTrace for this node");
79 abort();
80 }
81 else
82 {
83 return abstractTrace[node];
84 }
85}
86
88{
89 return abstractTrace.count(node) != 0;
90}
91
93{
94 AbstractState& as = getAbstractState(var->getICFGNode());
95 return as[var->getId()];
96}
97
104
106{
107 if (const ValVar* valVar = SVFUtil::dyn_cast<ValVar>(var))
108 return getAbstractValue(valVar);
109 else if (const ObjVar* objVar = SVFUtil::dyn_cast<ObjVar>(var))
110 return getAbstractValue(node, objVar);
111 assert(false && "Unknown SVFVar kind");
112 abort();
113}
114
116{
117 AbstractState& as = getAbstractState(var->getICFGNode());
118 as[var->getId()] = val;
119}
120
127
129{
130 if (const ValVar* valVar = SVFUtil::dyn_cast<ValVar>(var))
132 else if (const ObjVar* objVar = SVFUtil::dyn_cast<ObjVar>(var))
134 else
135 assert(false && "Unknown SVFVar kind");
136}
137
139{
141 for (const ValVar* var : vars)
142 {
143 u32_t id = var->getId();
144 result[id] = as[id];
145 }
146}
147
149{
151 for (const ObjVar* var : vars)
152 {
154 result.store(addr, as.load(addr));
155 }
156}
157
159{
161 for (const SVFVar* var : vars)
162 {
163 if (const ValVar* valVar = SVFUtil::dyn_cast<ValVar>(var))
164 {
165 u32_t id = valVar->getId();
166 result[id] = as[id];
167 }
168 else if (const ObjVar* objVar = SVFUtil::dyn_cast<ObjVar>(var))
169 {
171 result.store(addr, as.load(addr));
172 }
173 }
174}
175
184
191
196{
197 std::deque<const FunObjVar*> entryFunctions;
198
199 for (auto it = callGraph->begin(); it != callGraph->end(); ++it)
200 {
201 const CallGraphNode* cgNode = it->second;
202 const FunObjVar* fun = cgNode->getFunction();
203
204 // Skip declarations
205 if (fun->isDeclaration())
206 continue;
207
208 // Entry points are functions without callers (no incoming edges)
209 if (cgNode->getInEdges().empty())
210 {
211 // If main exists, put it first for priority using deque's push_front
212 if (fun->getName() == "main")
213 {
214 entryFunctions.push_front(fun);
215 }
216 else
217 {
218 entryFunctions.push_back(fun);
219 }
220 }
221 }
222
223 return entryFunctions;
224}
225
226
229{
230 // Always use multi-entry analysis from all entry points
232}
233
238{
239 // Collect all entry point functions
240 std::deque<const FunObjVar*> entryFunctions = collectProgEntryFuns();
241
242 if (entryFunctions.empty())
243 {
244 assert(false && "No entry functions found for analysis");
245 return;
246 }
247 // handle Global ICFGNode of SVFModule
249 for (const FunObjVar* entryFun : entryFunctions)
250 {
253 }
254}
255
262{
263 const ICFGNode* node = icfg->getGlobalICFGNode();
266
267 // Global Node, we just need to handle addr, load, store, copy and gep
268 for (const SVFStmt *stmt: node->getSVFStmts())
269 {
271 }
272
273 // BlkPtr represents a pointer whose target is statically unknown (e.g., from
274 // int2ptr casts, external function returns, or unmodeled instructions like
275 // AtomicCmpXchg). It should be an address pointing to the BlackHole object
276 // (ID=2), NOT an interval top.
277 //
278 // History: this was originally set to IntervalValue::top() as a quick fix when
279 // the analysis crashed on programs containing uninitialized BlkPtr. However,
280 // BlkPtr is semantically a *pointer* (address domain), not a numeric value
281 // (interval domain). Setting it to interval top broke cross-domain consistency:
282 // the interval domain and address domain gave contradictory information for the
283 // same variable. The correct representation is an AddressValue containing the
284 // BlackHole virtual address, which means "points to unknown memory".
287}
288
294{
295 std::vector<AbstractState> workList;
296 for (auto& edge : node->getInEdges())
297 {
298 const ICFGNode* pred = edge->getSrcNode();
299 if (abstractTrace.find(pred) == abstractTrace.end())
300 continue;
301
302 if (const IntraCFGEdge* intraCfgEdge = SVFUtil::dyn_cast<IntraCFGEdge>(edge))
303 {
305 if (intraCfgEdge->getCondition())
306 {
308 workList.push_back(tmpState);
309 }
310 else
311 {
312 workList.push_back(tmpState);
313 }
314 }
315 else if (SVFUtil::isa<CallCFGEdge>(edge))
316 {
317 workList.push_back(abstractTrace[pred]);
318 }
319 else if (SVFUtil::isa<RetCFGEdge>(edge))
320 {
321 switch (Options::HandleRecur())
322 {
323 case TOP:
324 {
325 workList.push_back(abstractTrace[pred]);
326 break;
327 }
328 case WIDEN_ONLY:
329 case WIDEN_NARROW:
330 {
331 const RetICFGNode* returnSite = SVFUtil::dyn_cast<RetICFGNode>(node);
332 const CallICFGNode* callSite = returnSite->getCallICFGNode();
334 workList.push_back(abstractTrace[pred]);
335 break;
336 }
337 }
338 }
339 }
340
341 if (workList.empty())
342 return false;
343
344 auto it = workList.begin();
345 abstractTrace[node] = *it;
346 for (++it; it != workList.end(); ++it)
347 abstractTrace[node].joinWith(*it);
348
349 return true;
350}
351
352
355{
357 // get cmp stmt's op0, op1, and predicate
358 NodeID op0 = cmpStmt->getOpVarID(0);
359 NodeID op1 = cmpStmt->getOpVarID(1);
360 NodeID res_id = cmpStmt->getResID();
361 s32_t predicate = cmpStmt->getPredicate();
362 // if op0 or op1 is nullptr, no need to change value, just copy the state
364 {
365 as = new_es;
366 return true;
367 }
368 // if op0 or op1 is undefined, return;
369 // skip address compare
370 if (new_es.inVarToAddrsTable(op0) || new_es.inVarToAddrsTable(op1))
371 {
372 as = new_es;
373 return true;
374 }
375 const LoadStmt *load_op0 = nullptr;
376 const LoadStmt *load_op1 = nullptr;
377 // get '%1 = load i32 s', and load inst may not exist
378 const SVFVar* loadVar0 = getSVFVar(op0);
379 if (!loadVar0->getInEdges().empty())
380 {
381 SVFStmt *loadVar0InStmt = *loadVar0->getInEdges().begin();
382 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar0InStmt))
383 {
385 }
386 else if (const CopyStmt *copyStmt = SVFUtil::dyn_cast<CopyStmt>(loadVar0InStmt))
387 {
388 loadVar0 = getSVFVar(copyStmt->getRHSVarID());
389 if (!loadVar0->getInEdges().empty())
390 {
391 SVFStmt *loadVar0InStmt2 = *loadVar0->getInEdges().begin();
392 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar0InStmt2))
393 {
395 }
396 }
397 }
398 }
399
400 const SVFVar* loadVar1 = getSVFVar(op1);
401 if (!loadVar1->getInEdges().empty())
402 {
403 SVFStmt *loadVar1InStmt = *loadVar1->getInEdges().begin();
404 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar1InStmt))
405 {
407 }
408 else if (const CopyStmt *copyStmt = SVFUtil::dyn_cast<CopyStmt>(loadVar1InStmt))
409 {
410 loadVar1 = getSVFVar(copyStmt->getRHSVarID());
411 if (!loadVar1->getInEdges().empty())
412 {
413 SVFStmt *loadVar1InStmt2 = *loadVar1->getInEdges().begin();
414 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar1InStmt2))
415 {
417 }
418 }
419 }
420 }
421 // for const X const, we may get concrete resVal instantly
422 // for var X const, we may get [0,1] if the intersection of var and const is not empty set
423 IntervalValue resVal = new_es[res_id].getInterval();
425 // If Var X const generates bottom value, it means this branch path is not feasible.
426 if (resVal.isBottom())
427 {
428 return false;
429 }
430
431 bool b0 = new_es[op0].getInterval().is_numeral();
432 bool b1 = new_es[op1].getInterval().is_numeral();
433
434 // if const X var, we should reverse op0 and op1.
435 if (b0 && !b1)
436 {
437 std::swap(op0, op1);
438 std::swap(load_op0, load_op1);
439 predicate = _switch_lhsrhs_predicate[predicate];
440 }
441 else
442 {
443 // if var X var, we cannot preset the branch condition to infer the intervals of var0,var1
444 if (!b0 && !b1)
445 {
446 as = new_es;
447 return true;
448 }
449 // if const X const, we can instantly get the resVal
450 else if (b0 && b1)
451 {
452 as = new_es;
453 return true;
454 }
455 }
456 // if cmp is 'var X const == false', we should reverse predicate 'var X' const == true'
457 // X' is reverse predicate of X
458 if (succ == 0)
459 {
460 predicate = _reverse_predicate[predicate];
461 }
462 else {}
463 // change interval range according to the compare predicate
464 AddressValue addrs;
465 if(load_op0 && new_es.inVarToAddrsTable(load_op0->getRHSVarID()))
466 addrs = new_es[load_op0->getRHSVarID()].getAddrs();
467
468 IntervalValue &lhs = new_es[op0].getInterval(), &rhs = new_es[op1].getInterval();
469 switch (predicate)
470 {
474 {
475 // Var == Const, so [var.lb, var.ub].meet_with(const)
477 // if lhs is register value, we should also change its mem obj
478 for (const auto &addr: addrs)
479 {
480 NodeID objId = new_es.getIDFromAddr(addr);
481 if (new_es.inAddrToValTable(objId))
482 {
483 new_es.load(addr).meet_with(rhs);
484 }
485 }
486 break;
487 }
491 // Compliment set
492 break;
497 // Var > Const, so [var.lb, var.ub].meet_with([Const+1, +INF])
498 lhs.meet_with(IntervalValue(rhs.lb() + 1, IntervalValue::plus_infinity()));
499 // if lhs is register value, we should also change its mem obj
500 for (const auto &addr: addrs)
501 {
502 NodeID objId = new_es.getIDFromAddr(addr);
503 if (new_es.inAddrToValTable(objId))
504 {
505 new_es.load(addr).meet_with(
507 }
508 }
509 break;
514 {
515 // Var >= Const, so [var.lb, var.ub].meet_with([Const, +INF])
517 // if lhs is register value, we should also change its mem obj
518 for (const auto &addr: addrs)
519 {
520 NodeID objId = new_es.getIDFromAddr(addr);
521 if (new_es.inAddrToValTable(objId))
522 {
523 new_es.load(addr).meet_with(
525 }
526 }
527 break;
528 }
533 {
534 // Var < Const, so [var.lb, var.ub].meet_with([-INF, const.ub-1])
535 lhs.meet_with(IntervalValue(IntervalValue::minus_infinity(), rhs.ub() - 1));
536 // if lhs is register value, we should also change its mem obj
537 for (const auto &addr: addrs)
538 {
539 NodeID objId = new_es.getIDFromAddr(addr);
540 if (new_es.inAddrToValTable(objId))
541 {
542 new_es.load(addr).meet_with(
544 }
545 }
546 break;
547 }
552 {
553 // Var <= Const, so [var.lb, var.ub].meet_with([-INF, const.ub])
555 // if lhs is register value, we should also change its mem obj
556 for (const auto &addr: addrs)
557 {
558 NodeID objId = new_es.getIDFromAddr(addr);
559 if (new_es.inAddrToValTable(objId))
560 {
561 new_es.load(addr).meet_with(
563 }
564 }
565 break;
566 }
568 break;
570 break;
571 default:
572 assert(false && "implement this part");
573 abort();
574 }
575 as = new_es;
576 return true;
577}
578
581{
583 IntervalValue& switch_cond = new_es[var->getId()].getInterval();
584 s64_t value = succ;
586 for (SVFStmt *cmpVarInStmt: var->getInEdges())
587 {
589 }
590 switch_cond.meet_with(IntervalValue(value, value));
591 if (switch_cond.isBottom())
592 {
593 return false;
594 }
595 while(!workList.empty())
596 {
597 const SVFStmt* stmt = workList.pop();
598 if (SVFUtil::isa<CopyStmt>(stmt))
599 {
600 IntervalValue& copy_cond = new_es[var->getId()].getInterval();
601 copy_cond.meet_with(IntervalValue(value, value));
602 }
603 else if (const LoadStmt* load = SVFUtil::dyn_cast<LoadStmt>(stmt))
604 {
605 if (new_es.inVarToAddrsTable(load->getRHSVarID()))
606 {
607 AddressValue &addrs = new_es[load->getRHSVarID()].getAddrs();
608 for (const auto &addr: addrs)
609 {
610 NodeID objId = new_es.getIDFromAddr(addr);
611 if (new_es.inAddrToValTable(objId))
612 {
614 }
615 }
616 }
617 }
618 }
619 as = new_es;
620 return true;
621}
622
625{
626 const SVFVar *cmpVar = intraEdge->getCondition();
627 if (cmpVar->getInEdges().empty())
628 {
630 intraEdge->getSuccessorCondValue(), as);
631 }
632 else
633 {
634 assert(!cmpVar->getInEdges().empty() &&
635 "no in edges?");
636 SVFStmt *cmpVarInStmt = *cmpVar->getInEdges().begin();
637 if (const CmpStmt *cmpStmt = SVFUtil::dyn_cast<CmpStmt>(cmpVarInStmt))
638 {
640 intraEdge->getSuccessorCondValue(), as);
641 }
642 else
643 {
645 cmpVar, intraEdge->getSuccessorCondValue(), as);
646 }
647 }
648 return true;
649}
657{
658 // Check reachability: pre-state must have been propagated by predecessors
659 bool isFunEntry = SVFUtil::isa<FunEntryICFGNode>(node);
660 if (!hasAbstractState(node))
661 {
662 if (isFunEntry)
663 {
664 // Entry point with no callers: inherit from global node
668 else
670 }
671 else
672 {
673 return false; // unreachable node
674 }
675 }
676
677 // Store the previous state for fixpoint detection
679
680 stat->getBlockTrace()++;
682
683 // Handle SVF statements
684 for (const SVFStmt *stmt: node->getSVFStmts())
685 {
687 }
688
689 // Handle call sites
690 if (const CallICFGNode* callNode = SVFUtil::dyn_cast<CallICFGNode>(node))
691 {
693 }
694
695 // Run detectors
696 for (auto& detector: detectors)
697 detector->detect(getAbstractState(node), node);
699
700 // Track this node as analyzed (for coverage statistics across all entry points)
701 allAnalyzedNodes.insert(node);
702
703 if (abstractTrace[node] == prevState)
704 return false;
705
706 return true;
707}
708
716{
717 auto it = preAnalysis->getFuncToWTO().find(funEntry->getFun());
718 if (it == preAnalysis->getFuncToWTO().end())
719 return;
720
721 // Push all top-level WTO components into the worklist in WTO order
722 FIFOWorkList<const ICFGWTOComp*> worklist(it->second->getWTOComponents());
723
724 while (!worklist.empty())
725 {
726 const ICFGWTOComp* comp = worklist.pop();
727
728 if (const ICFGSingletonWTO* singleton = SVFUtil::dyn_cast<ICFGSingletonWTO>(comp))
729 {
730 const ICFGNode* node = singleton->getICFGNode();
732 handleICFGNode(node);
733 }
734 else if (const ICFGCycleWTO* cycle = SVFUtil::dyn_cast<ICFGCycleWTO>(comp))
735 {
736 if (mergeStatesFromPredecessors(cycle->head()->getICFGNode()))
738 }
739 }
740}
741
742
744{
745 if (const CallICFGNode* callNode = SVFUtil::dyn_cast<CallICFGNode>(node))
746 {
747 if (isExtCall(callNode))
748 {
750 }
751 else
752 {
753 // Handle both direct and indirect calls uniformly
755 }
756 }
757 else
758 assert (false && "it is not call node");
759}
760
762{
763 return SVFUtil::isExtCall(callNode->getCalledFunction());
764}
765
767{
769 for (auto& detector : detectors)
770 {
771 detector->handleStubFunctions(callNode);
772 }
773}
774
780
783{
786 const RetICFGNode *retNode = callNode->getRetICFGNode();
787 if (retNode->getSVFStmts().size() > 0)
788 {
789 if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(*retNode->getSVFStmts().begin()))
790 {
791 if (!retPE->getLHSVar()->isPointer() &&
792 !retPE->getLHSVar()->isConstDataOrAggDataButNotNullPtr())
793 {
794 as[retPE->getLHSVarID()] = IntervalValue::top();
795 }
796 }
797 }
799}
800
808
811{
812 // Direct call: get callee directly from call node
813 if (const FunObjVar* callee = callNode->getCalledFunction())
814 return callee;
815
816 // Indirect call: resolve callee through pointer analysis
818 auto it = callsiteMaps.find(callNode);
819 if (it == callsiteMaps.end())
820 return nullptr;
821
822 NodeID call_id = it->second;
824 return nullptr;
825
827 if (!as.inVarToAddrsTable(call_id))
828 return nullptr;
829
831 if (Addrs.getAddrs().empty())
832 return nullptr;
833
835 const SVFVar* func_var = getSVFVar(as.getIDFromAddr(addr));
836 return SVFUtil::dyn_cast<FunObjVar>(func_var);
837}
838
841{
843 if (!callee)
844 return false;
845
846 // Non-recursive function: never skip, always inline
848 return false;
849
850 // For recursive functions, skip only recursive callsites (within same SCC).
851 // Entry calls (from outside SCC) are not skipped - they are inlined so that
852 // handleLoopOrRecursion() can analyze the function body.
853 // This applies uniformly to all modes (TOP/WIDEN_ONLY/WIDEN_NARROW).
855}
856
859{
860 // Non-recursive functions (regular loops): always apply narrowing
861 if (!isRecursiveFun(fun))
862 return true;
863
864 // Recursive functions: WIDEN_NARROW applies narrowing, WIDEN_ONLY does not
865 // TOP mode exits early in handleLoopOrRecursion, so should not reach here
866 switch (Options::HandleRecur())
867 {
868 case TOP:
869 assert(false && "TOP mode should not reach narrowing phase for recursive functions");
870 return false;
871 case WIDEN_ONLY:
872 return false; // Skip narrowing for recursive functions
873 case WIDEN_NARROW:
874 return true; // Apply narrowing for recursive functions
875 default:
876 assert(false && "Unknown recursion handling mode");
877 return false;
878 }
879}
890{
894 return;
895
896 // Direct call: callee is known
897 if (const FunObjVar* callee = callNode->getCalledFunction())
898 {
901 // Resume return node from caller's state (context-insensitive).
902 // Callee's side effects are reflected through shared abstractTrace.
903 const RetICFGNode* retNode = callNode->getRetICFGNode();
905 return;
906 }
907
908 // Indirect call: use Andersen's call graph to get all resolved callees.
909 const RetICFGNode* retNode = callNode->getRetICFGNode();
911 {
913 for (const FunObjVar* callee : callees)
914 {
915 if (callee->isDeclaration())
916 continue;
919 }
920 }
921 // Resume return node from caller's state (context-insensitive)
923}
924
966{
967 const ICFGNode* cycle_head = cycle->head()->getICFGNode();
968
969 // TOP mode for recursive function cycles: set all stores and return value to TOP
970 if (Options::HandleRecur() == TOP && isRecursiveFun(cycle_head->getFun()))
971 {
972 if (caller)
974 return;
975 }
976
977 // Iterate until fixpoint with widening/narrowing on the cycle head.
978 bool increasing = true;
980 for (u32_t cur_iter = 0;; cur_iter++)
981 {
982 if (cur_iter >= widen_delay)
983 {
984 // Save state before processing head
986
987 // Process cycle head: merge from predecessors, then execute statements
988 // (uses same gated pattern as handleWTOComponent in origin/master)
992
993 if (increasing)
994 {
997 {
998 // Widening fixpoint reached; switch to narrowing phase.
999 increasing = false;
1000 continue;
1001 }
1002 }
1003 else
1004 {
1005 if (!shouldApplyNarrowing(cycle_head->getFun()))
1006 break;
1009 break;
1010 }
1011 }
1012 else
1013 {
1014 // Before widen_delay: process cycle head with gated pattern
1017 }
1018
1019 // Process cycle body components (each with gated merge+handle)
1020 for (const ICFGWTOComp* comp : cycle->getWTOComponents())
1021 {
1022 if (const ICFGSingletonWTO* singleton = SVFUtil::dyn_cast<ICFGSingletonWTO>(comp))
1023 {
1024 const ICFGNode* node = singleton->getICFGNode();
1026 handleICFGNode(node);
1027 }
1028 else if (const ICFGCycleWTO* subCycle = SVFUtil::dyn_cast<ICFGCycleWTO>(comp))
1029 {
1030 if (mergeStatesFromPredecessors(subCycle->head()->getICFGNode()))
1032 }
1033 }
1034 }
1035}
1036
1038{
1039 if (const AddrStmt *addr = SVFUtil::dyn_cast<AddrStmt>(stmt))
1040 {
1042 }
1043 else if (const BinaryOPStmt *binary = SVFUtil::dyn_cast<BinaryOPStmt>(stmt))
1044 {
1046 }
1047 else if (const CmpStmt *cmp = SVFUtil::dyn_cast<CmpStmt>(stmt))
1048 {
1050 }
1051 else if (SVFUtil::isa<UnaryOPStmt>(stmt))
1052 {
1053 }
1054 else if (SVFUtil::isa<BranchStmt>(stmt))
1055 {
1056 // branch stmt is handled in hasBranchES
1057 }
1058 else if (const LoadStmt *load = SVFUtil::dyn_cast<LoadStmt>(stmt))
1059 {
1060 updateStateOnLoad(load);
1061 }
1062 else if (const StoreStmt *store = SVFUtil::dyn_cast<StoreStmt>(stmt))
1063 {
1064 updateStateOnStore(store);
1065 }
1066 else if (const CopyStmt *copy = SVFUtil::dyn_cast<CopyStmt>(stmt))
1067 {
1069 }
1070 else if (const GepStmt *gep = SVFUtil::dyn_cast<GepStmt>(stmt))
1071 {
1073 }
1074 else if (const SelectStmt *select = SVFUtil::dyn_cast<SelectStmt>(stmt))
1075 {
1077 }
1078 else if (const PhiStmt *phi = SVFUtil::dyn_cast<PhiStmt>(stmt))
1079 {
1081 }
1082 else if (const CallPE *callPE = SVFUtil::dyn_cast<CallPE>(stmt))
1083 {
1084 // To handle Call Edge
1086 }
1087 else if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(stmt))
1088 {
1089 updateStateOnRet(retPE);
1090 }
1091 else
1092 assert(false && "implement this part");
1093 // NullPtr is index 0, it should not be changed
1094 assert(!getAbstractState(stmt->getICFGNode())[IRGraph::NullPtr].isInterval() &&
1095 !getAbstractState(stmt->getICFGNode())[IRGraph::NullPtr].isAddr());
1096}
1097
1100{
1102 const RetICFGNode *retNode = callNode->getRetICFGNode();
1103 if (retNode->getSVFStmts().size() > 0)
1104 {
1105 if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(*retNode->getSVFStmts().begin()))
1106 {
1108 if (!retPE->getLHSVar()->isPointer() && !retPE->getLHSVar()->isConstDataOrAggDataButNotNullPtr())
1109 as[retPE->getLHSVarID()] = IntervalValue::top();
1110 }
1111 }
1112 if (!retNode->getOutEdges().empty())
1113 {
1114 if (retNode->getOutEdges().size() == 1)
1115 {
1116
1117 }
1118 else
1119 {
1120 return;
1121 }
1122 }
1125 for (const SVFBasicBlock * bb: callNode->getCalledFunction()->getReachableBBs())
1126 {
1127 for (const ICFGNode* node: bb->getICFGNodeList())
1128 {
1129 for (const SVFStmt *stmt: node->getSVFStmts())
1130 {
1131 if (const StoreStmt *store = SVFUtil::dyn_cast<StoreStmt>(stmt))
1132 {
1133 const SVFVar *rhsVar = store->getRHSVar();
1134 u32_t lhs = store->getLHSVarID();
1135 if (as.inVarToAddrsTable(lhs))
1136 {
1137 if (!rhsVar->isPointer() && !rhsVar->isConstDataOrAggDataButNotNullPtr())
1138 {
1139 const AbstractValue &addrs = as[lhs];
1140 for (const auto &addr: addrs.getAddrs())
1141 {
1142 as.store(addr, IntervalValue::top());
1143 }
1144 }
1145 }
1146 }
1147 }
1148 }
1149 }
1150}
1151
1153{
1154 AbstractState& as = getAbstractState(gep->getICFGNode());
1155 u32_t rhs = gep->getRHSVarID();
1156 u32_t lhs = gep->getLHSVarID();
1157 IntervalValue offsetPair = as.getElementIndex(gep);
1159 APOffset lb = offsetPair.lb().getIntNumeral() < Options::MaxFieldLimit()?
1160 offsetPair.lb().getIntNumeral(): Options::MaxFieldLimit();
1161 APOffset ub = offsetPair.ub().getIntNumeral() < Options::MaxFieldLimit()?
1162 offsetPair.ub().getIntNumeral(): Options::MaxFieldLimit();
1163 for (APOffset i = lb; i <= ub; i++)
1164 gepAddrs.join_with(as.getGepObjAddrs(rhs,IntervalValue(i)));
1165 as[lhs] = gepAddrs;
1166}
1167
1169{
1170 AbstractState& as = getAbstractState(select->getICFGNode());
1171 u32_t res = select->getResID();
1172 u32_t tval = select->getTrueValue()->getId();
1173 u32_t fval = select->getFalseValue()->getId();
1174 u32_t cond = select->getCondition()->getId();
1175 if (as[cond].getInterval().is_numeral())
1176 {
1177 as[res] = as[cond].getInterval().is_zero() ? as[fval] : as[tval];
1178 }
1179 else
1180 {
1181 as[res] = as[tval];
1182 as[res].join_with(as[fval]);
1183 }
1184}
1185
1187{
1188 const ICFGNode* icfgNode = phi->getICFGNode();
1189 AbstractState& as = getAbstractState(icfgNode);
1190 u32_t res = phi->getResID();
1192 for (u32_t i = 0; i < phi->getOpVarNum(); i++)
1193 {
1194 NodeID curId = phi->getOpVarID(i);
1195 const ICFGNode* opICFGNode = phi->getOpICFGNode(i);
1197 {
1201 // if IntraEdge, check the condition, if it is feasible, join the value
1202 // if IntraEdge but not conditional edge, join the value
1203 // if not IntraEdge, join the value
1204 if (edge)
1205 {
1206 const IntraCFGEdge* intraEdge = SVFUtil::cast<IntraCFGEdge>(edge);
1207 if (intraEdge->getCondition())
1208 {
1210 rhs.join_with(opAs[curId]);
1211 }
1212 else
1213 rhs.join_with(opAs[curId]);
1214 }
1215 else
1216 {
1217 rhs.join_with(opAs[curId]);
1218 }
1219 }
1220 }
1221 as[res] = rhs;
1222}
1223
1224
1226{
1227 AbstractState& as = getAbstractState(callPE->getICFGNode());
1228 NodeID lhs = callPE->getLHSVarID();
1229 NodeID rhs = callPE->getRHSVarID();
1230 as[lhs] = as[rhs];
1231}
1232
1234{
1236 NodeID lhs = retPE->getLHSVarID();
1237 NodeID rhs = retPE->getRHSVarID();
1238 as[lhs] = as[rhs];
1239}
1240
1241
1243{
1244 AbstractState& as = getAbstractState(addr->getICFGNode());
1245 as.initObjVar(SVFUtil::cast<ObjVar>(addr->getRHSVar()));
1246 if (addr->getRHSVar()->getType()->getKind() == SVFType::SVFIntegerTy)
1247 as[addr->getRHSVarID()].getInterval().meet_with(utils->getRangeLimitFromType(addr->getRHSVar()->getType()));
1248 as[addr->getLHSVarID()] = as[addr->getRHSVarID()];
1249}
1250
1251
1253{
1257 const ICFGNode* node = binary->getICFGNode();
1259 u32_t op0 = binary->getOpVarID(0);
1260 u32_t op1 = binary->getOpVarID(1);
1261 u32_t res = binary->getResID();
1262 if (!as.inVarToValTable(op0)) as[op0] = IntervalValue::top();
1263 if (!as.inVarToValTable(op1)) as[op1] = IntervalValue::top();
1264 IntervalValue &lhs = as[op0].getInterval(), &rhs = as[op1].getInterval();
1266 switch (binary->getOpcode())
1267 {
1268 case BinaryOPStmt::Add:
1269 case BinaryOPStmt::FAdd:
1270 resVal = (lhs + rhs);
1271 break;
1272 case BinaryOPStmt::Sub:
1273 case BinaryOPStmt::FSub:
1274 resVal = (lhs - rhs);
1275 break;
1276 case BinaryOPStmt::Mul:
1277 case BinaryOPStmt::FMul:
1278 resVal = (lhs * rhs);
1279 break;
1280 case BinaryOPStmt::SDiv:
1281 case BinaryOPStmt::FDiv:
1282 case BinaryOPStmt::UDiv:
1283 resVal = (lhs / rhs);
1284 break;
1285 case BinaryOPStmt::SRem:
1286 case BinaryOPStmt::FRem:
1287 case BinaryOPStmt::URem:
1288 resVal = (lhs % rhs);
1289 break;
1290 case BinaryOPStmt::Xor:
1291 resVal = (lhs ^ rhs);
1292 break;
1293 case BinaryOPStmt::And:
1294 resVal = (lhs & rhs);
1295 break;
1296 case BinaryOPStmt::Or:
1297 resVal = (lhs | rhs);
1298 break;
1299 case BinaryOPStmt::AShr:
1300 resVal = (lhs >> rhs);
1301 break;
1302 case BinaryOPStmt::Shl:
1303 resVal = (lhs << rhs);
1304 break;
1305 case BinaryOPStmt::LShr:
1306 resVal = (lhs >> rhs);
1307 break;
1308 default:
1309 assert(false && "undefined binary: ");
1310 }
1311 as[res] = resVal;
1312}
1313
1315{
1316 AbstractState& as = getAbstractState(cmp->getICFGNode());
1317 u32_t op0 = cmp->getOpVarID(0);
1318 u32_t op1 = cmp->getOpVarID(1);
1319 // if it is address
1320 if (as.inVarToAddrsTable(op0) && as.inVarToAddrsTable(op1))
1321 {
1323 AddressValue addrOp0 = as[op0].getAddrs();
1324 AddressValue addrOp1 = as[op1].getAddrs();
1325 u32_t res = cmp->getResID();
1326 if (addrOp0.equals(addrOp1))
1327 {
1328 resVal = IntervalValue(1, 1);
1329 }
1330 else if (addrOp0.hasIntersect(addrOp1))
1331 {
1332 resVal = IntervalValue(0, 1);
1333 }
1334 else
1335 {
1336 resVal = IntervalValue(0, 0);
1337 }
1338 as[res] = resVal;
1339 }
1340 // if op0 or op1 is nullptr, compare abstractValue instead of touching addr or interval
1341 else if (op0 == IRGraph::NullPtr || op1 == IRGraph::NullPtr)
1342 {
1343 u32_t res = cmp->getResID();
1344 IntervalValue resVal = (as[op0].equals(as[op1])) ? IntervalValue(1, 1) : IntervalValue(0, 0);
1345 as[res] = resVal;
1346 }
1347 else
1348 {
1349 if (!as.inVarToValTable(op0))
1351 if (!as.inVarToValTable(op1))
1353 u32_t res = cmp->getResID();
1354 if (as.inVarToValTable(op0) && as.inVarToValTable(op1))
1355 {
1357 if (as[op0].isInterval() && as[op1].isInterval())
1358 {
1359 IntervalValue &lhs = as[op0].getInterval(),
1360 &rhs = as[op1].getInterval();
1361 // AbstractValue
1362 auto predicate = cmp->getPredicate();
1363 switch (predicate)
1364 {
1365 case CmpStmt::ICMP_EQ:
1366 case CmpStmt::FCMP_OEQ:
1367 case CmpStmt::FCMP_UEQ:
1368 resVal = (lhs == rhs);
1369 // resVal = (lhs.getInterval() == rhs.getInterval());
1370 break;
1371 case CmpStmt::ICMP_NE:
1372 case CmpStmt::FCMP_ONE:
1373 case CmpStmt::FCMP_UNE:
1374 resVal = (lhs != rhs);
1375 break;
1376 case CmpStmt::ICMP_UGT:
1377 case CmpStmt::ICMP_SGT:
1378 case CmpStmt::FCMP_OGT:
1379 case CmpStmt::FCMP_UGT:
1380 resVal = (lhs > rhs);
1381 break;
1382 case CmpStmt::ICMP_UGE:
1383 case CmpStmt::ICMP_SGE:
1384 case CmpStmt::FCMP_OGE:
1385 case CmpStmt::FCMP_UGE:
1386 resVal = (lhs >= rhs);
1387 break;
1388 case CmpStmt::ICMP_ULT:
1389 case CmpStmt::ICMP_SLT:
1390 case CmpStmt::FCMP_OLT:
1391 case CmpStmt::FCMP_ULT:
1392 resVal = (lhs < rhs);
1393 break;
1394 case CmpStmt::ICMP_ULE:
1395 case CmpStmt::ICMP_SLE:
1396 case CmpStmt::FCMP_OLE:
1397 case CmpStmt::FCMP_ULE:
1398 resVal = (lhs <= rhs);
1399 break;
1401 resVal = IntervalValue(0, 0);
1402 break;
1403 case CmpStmt::FCMP_TRUE:
1404 resVal = IntervalValue(1, 1);
1405 break;
1406 case CmpStmt::FCMP_ORD:
1407 case CmpStmt::FCMP_UNO:
1408 // FCMP_ORD: true if both operands are not NaN
1409 // FCMP_UNO: true if either operand is NaN
1410 // Conservatively return [0, 1] since we don't track NaN
1411 resVal = IntervalValue(0, 1);
1412 break;
1413 default:
1414 assert(false && "undefined compare: ");
1415 }
1416 as[res] = resVal;
1417 }
1418 else if (as[op0].isAddr() && as[op1].isAddr())
1419 {
1420 AddressValue &lhs = as[op0].getAddrs(),
1421 &rhs = as[op1].getAddrs();
1422 auto predicate = cmp->getPredicate();
1423 switch (predicate)
1424 {
1425 case CmpStmt::ICMP_EQ:
1426 case CmpStmt::FCMP_OEQ:
1427 case CmpStmt::FCMP_UEQ:
1428 {
1429 if (lhs.hasIntersect(rhs))
1430 {
1431 resVal = IntervalValue(0, 1);
1432 }
1433 else if (lhs.empty() && rhs.empty())
1434 {
1435 resVal = IntervalValue(1, 1);
1436 }
1437 else
1438 {
1439 resVal = IntervalValue(0, 0);
1440 }
1441 break;
1442 }
1443 case CmpStmt::ICMP_NE:
1444 case CmpStmt::FCMP_ONE:
1445 case CmpStmt::FCMP_UNE:
1446 {
1447 if (lhs.hasIntersect(rhs))
1448 {
1449 resVal = IntervalValue(0, 1);
1450 }
1451 else if (lhs.empty() && rhs.empty())
1452 {
1453 resVal = IntervalValue(0, 0);
1454 }
1455 else
1456 {
1457 resVal = IntervalValue(1, 1);
1458 }
1459 break;
1460 }
1461 case CmpStmt::ICMP_UGT:
1462 case CmpStmt::ICMP_SGT:
1463 case CmpStmt::FCMP_OGT:
1464 case CmpStmt::FCMP_UGT:
1465 {
1466 if (lhs.size() == 1 && rhs.size() == 1)
1467 {
1468 resVal = IntervalValue(*lhs.begin() > *rhs.begin());
1469 }
1470 else
1471 {
1472 resVal = IntervalValue(0, 1);
1473 }
1474 break;
1475 }
1476 case CmpStmt::ICMP_UGE:
1477 case CmpStmt::ICMP_SGE:
1478 case CmpStmt::FCMP_OGE:
1479 case CmpStmt::FCMP_UGE:
1480 {
1481 if (lhs.size() == 1 && rhs.size() == 1)
1482 {
1483 resVal = IntervalValue(*lhs.begin() >= *rhs.begin());
1484 }
1485 else
1486 {
1487 resVal = IntervalValue(0, 1);
1488 }
1489 break;
1490 }
1491 case CmpStmt::ICMP_ULT:
1492 case CmpStmt::ICMP_SLT:
1493 case CmpStmt::FCMP_OLT:
1494 case CmpStmt::FCMP_ULT:
1495 {
1496 if (lhs.size() == 1 && rhs.size() == 1)
1497 {
1498 resVal = IntervalValue(*lhs.begin() < *rhs.begin());
1499 }
1500 else
1501 {
1502 resVal = IntervalValue(0, 1);
1503 }
1504 break;
1505 }
1506 case CmpStmt::ICMP_ULE:
1507 case CmpStmt::ICMP_SLE:
1508 case CmpStmt::FCMP_OLE:
1509 case CmpStmt::FCMP_ULE:
1510 {
1511 if (lhs.size() == 1 && rhs.size() == 1)
1512 {
1513 resVal = IntervalValue(*lhs.begin() <= *rhs.begin());
1514 }
1515 else
1516 {
1517 resVal = IntervalValue(0, 1);
1518 }
1519 break;
1520 }
1522 resVal = IntervalValue(0, 0);
1523 break;
1524 case CmpStmt::FCMP_TRUE:
1525 resVal = IntervalValue(1, 1);
1526 break;
1527 case CmpStmt::FCMP_ORD:
1528 case CmpStmt::FCMP_UNO:
1529 // FCMP_ORD: true if both operands are not NaN
1530 // FCMP_UNO: true if either operand is NaN
1531 // Conservatively return [0, 1] since we don't track NaN
1532 resVal = IntervalValue(0, 1);
1533 break;
1534 default:
1535 assert(false && "undefined compare: ");
1536 }
1537 as[res] = resVal;
1538 }
1539 }
1540 }
1541}
1542
1544{
1546 u32_t rhs = load->getRHSVarID();
1547 u32_t lhs = load->getLHSVarID();
1548 as[lhs] = as.loadValue(rhs);
1549}
1550
1552{
1554 u32_t rhs = store->getRHSVarID();
1555 u32_t lhs = store->getLHSVarID();
1556 as.storeValue(lhs, as[rhs]);
1557}
1558
1560{
1561 auto getZExtValue = [](AbstractState& as, const SVFVar* var)
1562 {
1563 const SVFType* type = var->getType();
1564 if (SVFUtil::isa<SVFIntegerType>(type))
1565 {
1566 u32_t bits = type->getByteSize() * 8;
1567 if (as[var->getId()].getInterval().is_numeral())
1568 {
1569 if (bits == 8)
1570 {
1571 int8_t signed_i8_value = as[var->getId()].getInterval().getIntNumeral();
1574 }
1575 else if (bits == 16)
1576 {
1577 s16_t signed_i16_value = as[var->getId()].getInterval().getIntNumeral();
1580 }
1581 else if (bits == 32)
1582 {
1583 s32_t signed_i32_value = as[var->getId()].getInterval().getIntNumeral();
1586 }
1587 else if (bits == 64)
1588 {
1589 s64_t signed_i64_value = as[var->getId()].getInterval().getIntNumeral();
1591 // we only support i64 at most
1592 }
1593 else
1594 assert(false && "cannot support int type other than u8/16/32/64");
1595 }
1596 else
1597 {
1598 return IntervalValue::top(); // TODO: may have better solution
1599 }
1600 }
1601 return IntervalValue::top(); // TODO: may have better solution
1602 };
1603
1604 auto getTruncValue = [&](const AbstractState& as, const SVFVar* var,
1605 const SVFType* dstType)
1606 {
1607 const IntervalValue& itv = as[var->getId()].getInterval();
1608 if(itv.isBottom()) return itv;
1609 // get the value of ub and lb
1611 s64_t int_ub = itv.ub().getIntNumeral();
1612 // get dst type
1613 u32_t dst_bits = dstType->getByteSize() * 8;
1614 if (dst_bits == 8)
1615 {
1616 // get the signed value of ub and lb
1617 int8_t s8_lb = static_cast<int8_t>(int_lb);
1618 int8_t s8_ub = static_cast<int8_t>(int_ub);
1619 if (s8_lb > s8_ub)
1620 {
1621 // return range of s8
1623 }
1624 return IntervalValue(s8_lb, s8_ub);
1625 }
1626 else if (dst_bits == 16)
1627 {
1628 // get the signed value of ub and lb
1629 s16_t s16_lb = static_cast<s16_t>(int_lb);
1630 s16_t s16_ub = static_cast<s16_t>(int_ub);
1631 if (s16_lb > s16_ub)
1632 {
1633 // return range of s16
1635 }
1636 return IntervalValue(s16_lb, s16_ub);
1637 }
1638 else if (dst_bits == 32)
1639 {
1640 // get the signed value of ub and lb
1641 s32_t s32_lb = static_cast<s32_t>(int_lb);
1642 s32_t s32_ub = static_cast<s32_t>(int_ub);
1643 if (s32_lb > s32_ub)
1644 {
1645 // return range of s32
1647 }
1648 return IntervalValue(s32_lb, s32_ub);
1649 }
1650 else
1651 {
1652 assert(false && "cannot support dst int type other than u8/16/32");
1653 abort();
1654 }
1655 };
1656
1657 AbstractState& as = getAbstractState(copy->getICFGNode());
1658 u32_t lhs = copy->getLHSVarID();
1659 u32_t rhs = copy->getRHSVarID();
1660
1661 if (copy->getCopyKind() == CopyStmt::COPYVAL)
1662 {
1663 as[lhs] = as[rhs];
1664 }
1665 else if (copy->getCopyKind() == CopyStmt::ZEXT)
1666 {
1667 as[lhs] = getZExtValue(as, copy->getRHSVar());
1668 }
1669 else if (copy->getCopyKind() == CopyStmt::SEXT)
1670 {
1671 as[lhs] = as[rhs].getInterval();
1672 }
1673 else if (copy->getCopyKind() == CopyStmt::FPTOSI)
1674 {
1675 as[lhs] = as[rhs].getInterval();
1676 }
1677 else if (copy->getCopyKind() == CopyStmt::FPTOUI)
1678 {
1679 as[lhs] = as[rhs].getInterval();
1680 }
1681 else if (copy->getCopyKind() == CopyStmt::SITOFP)
1682 {
1683 as[lhs] = as[rhs].getInterval();
1684 }
1685 else if (copy->getCopyKind() == CopyStmt::UITOFP)
1686 {
1687 as[lhs] = as[rhs].getInterval();
1688 }
1689 else if (copy->getCopyKind() == CopyStmt::TRUNC)
1690 {
1691 as[lhs] = getTruncValue(as, copy->getRHSVar(), copy->getLHSVar()->getType());
1692 }
1693 else if (copy->getCopyKind() == CopyStmt::FPTRUNC)
1694 {
1695 as[lhs] = as[rhs].getInterval();
1696 }
1697 else if (copy->getCopyKind() == CopyStmt::INTTOPTR)
1698 {
1699 //insert nullptr
1700 }
1701 else if (copy->getCopyKind() == CopyStmt::PTRTOINT)
1702 {
1704 }
1705 else if (copy->getCopyKind() == CopyStmt::BITCAST)
1706 {
1707 if (as[rhs].isAddr())
1708 {
1709 as[lhs] = as[rhs];
1710 }
1711 else
1712 {
1713 // do nothing
1714 }
1715 }
1716 else
1717 assert(false && "undefined copy kind");
1718}
1719
1720
1721
#define BlackHoleObjAddr
newitem type
Definition cJSON.cpp:2739
copy
Definition cJSON.cpp:414
void finializeStat()
Definition AEStat.cpp:43
u32_t & getBlockTrace()
Definition AEStat.h:70
void performStat() override
Definition AEStat.cpp:120
u32_t & getICFGNodeTrace()
Definition AEStat.h:78
void countStateSize()
Definition AEStat.cpp:31
Handles external API calls and manages abstract states.
Definition AbsExtAPI.h:44
void collectCheckPoint()
void handleExtAPI(const CallICFGNode *call)
Handles an external API call.
void checkPointAllSet()
IntervalValue getRangeLimitFromType(const SVFType *type)
Gets the range limit from a type.
void updateStateOnCall(const CallPE *callPE)
const FunObjVar * getCallee(const CallICFGNode *callNode)
Get callee function: directly for direct calls, via pointer analysis for indirect calls.
void updateStateOnStore(const StoreStmt *store)
virtual void handleFunCall(const CallICFGNode *callNode)
void analyzeFromAllProgEntries()
Analyze all entry points (functions without callers)
void updateStateOnGep(const GepStmt *gep)
virtual void handleGlobalNode()
Initialize abstract state for the global ICFG node and process global statements.
void handleFunction(const ICFGNode *funEntry, const CallICFGNode *caller=nullptr)
Handle a function body via worklist-driven WTO traversal starting from funEntry.
virtual bool isExtCall(const CallICFGNode *callNode)
void updateAbstractValue(const ValVar *var, const AbstractValue &val)
Set abstract value for a top-level variable at a given ICFG node.
AbstractState & getAbstractState(const ICFGNode *node)
Retrieve the abstract state from the trace for a given ICFG node; asserts if no trace exists.
void updateStateOnPhi(const PhiStmt *phi)
bool handleICFGNode(const ICFGNode *node)
Handle an ICFG node: execute statements; return true if state changed.
bool isBranchFeasible(const IntraCFGEdge *intraEdge, AbstractState &as)
Check if the branch on intraEdge is feasible under abstract state as.
std::vector< std::unique_ptr< AEDetector > > detectors
virtual void handleExtCall(const CallICFGNode *callNode)
bool isSwitchBranchFeasible(const SVFVar *var, s64_t succ, AbstractState &as)
Check if switch branch with case value succ is feasible; refine intervals in as accordingly.
bool mergeStatesFromPredecessors(const ICFGNode *node)
void updateStateOnSelect(const SelectStmt *select)
virtual void handleSVFStatement(const SVFStmt *stmt)
Dispatch an SVF statement (Addr/Binary/Cmp/Load/Store/Copy/Gep/Select/Phi/Call/Ret) to its handler.
bool skipRecursiveCall(const CallICFGNode *callNode)
Skip recursive callsites (within SCC); entry calls from outside SCC are not skipped.
bool hasAbstractState(const ICFGNode *node)
Check if an abstract state exists in the trace for a given ICFG node.
SVFIR * svfir
protected data members, also used in subclasses
virtual void runOnModule(ICFG *icfg)
virtual bool isRecursiveCallSite(const CallICFGNode *callNode, const FunObjVar *)
Check if caller and callee are in the same CallGraph SCC (i.e. a recursive callsite)
bool shouldApplyNarrowing(const FunObjVar *fun)
Check if narrowing should be applied: always for regular loops, mode-dependent for recursion.
virtual bool isRecursiveFun(const FunObjVar *fun)
Check if a function is recursive (part of a call graph SCC)
void updateStateOnAddr(const AddrStmt *addr)
virtual ~AbstractInterpretation()
Destructor.
bool isCmpBranchFeasible(const CmpStmt *cmpStmt, s64_t succ, AbstractState &as)
Check if cmpStmt with successor value succ is feasible; refine intervals in as accordingly.
virtual void handleCallSite(const ICFGNode *node)
Handle a call site node: dispatch to ext-call, direct-call, or indirect-call handling.
void updateStateOnRet(const RetPE *retPE)
void updateStateOnCopy(const CopyStmt *copy)
void propagateObjVarAbsVal(const ObjVar *var, const ICFGNode *defSite)
Propagate an ObjVar's abstract value from defSite to all its use-site ICFGNodes via SVFG.
std::deque< const FunObjVar * > collectProgEntryFuns()
Get all entry point functions (functions without callers)
void updateStateOnLoad(const LoadStmt *load)
void updateStateOnBinary(const BinaryOPStmt *binary)
Map< const ICFGNode *, AbstractState > abstractTrace
Set< const ICFGNode * > allAnalyzedNodes
virtual void setTopToObjInRecursion(const CallICFGNode *callnode)
Set all store targets and return value to TOP for a recursive call node.
virtual void handleRecursiveCall(const CallICFGNode *callNode)
Handle recursive call in TOP mode: set all stores and return value to TOP.
virtual void handleLoopOrRecursion(const ICFGCycleWTO *cycle, const CallICFGNode *caller=nullptr)
Handle a WTO cycle (loop or recursive function) using widening/narrowing iteration.
Map< s32_t, s32_t > _switch_lhsrhs_predicate
const SVFVar * getSVFVar(NodeID varId) const
Retrieve SVFVar given its ID; asserts if no such variable exists.
void updateStateOnCmp(const CmpStmt *cmp)
const AbstractValue & getAbstractValue(const ValVar *var)
Retrieve abstract value for a top-level variable at a given ICFG node.
static u32_t getVirtualMemAddress(u32_t idx)
The physical address starts with 0x7f...... + idx.
AbstractState narrowing(const AbstractState &other)
domain narrow with other, and return the narrowed domain
AbstractState widening(const AbstractState &other)
domain widen with other, and return the widened domain
AddressValue & getAddrs()
bool meet_with(const AddressValue &other)
Return a intersected AddressValue.
AddrSet::const_iterator begin() const
NodeID getRHSVarID() const
NodeID getLHSVarID() const
s64_t getIntNumeral() const
const FunObjVar * getFunction() const
Get function of this call node.
Definition CallGraph.h:191
bool hasIndCSCallees(const CallICFGNode *cs) const
Definition CallGraph.h:335
const FunctionSet & getIndCSCallees(const CallICFGNode *cs) const
Definition CallGraph.h:339
@ ICMP_SGT
signed greater than
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ ICMP_UGE
unsigned greater or equal
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ ICMP_ULE
unsigned less or equal
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ ICMP_NE
not equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_ULT
unsigned less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ ICMP_SLT
signed less than
@ ICMP_UGT
unsigned greater than
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_SLE
signed less or equal
bool empty() const
Definition WorkList.h:161
bool isDeclaration() const
iterator begin()
Iterators.
const GEdgeSetTy & getInEdges() const
const SVFStmtList & getSVFStmts() const
Definition ICFGNode.h:115
ICFGEdge * getICFGEdge(const ICFGNode *src, const ICFGNode *dst, ICFGEdge::ICFGEdgeK kind)
Get a SVFG edge according to src and dst.
Definition ICFG.cpp:311
void updateCallGraph(CallGraph *callgraph)
update ICFG for indirect calls
Definition ICFG.cpp:427
FunEntryICFGNode * getFunEntryICFGNode(const FunObjVar *fun)
Add a function entry node.
Definition ICFG.cpp:242
GlobalICFGNode * getGlobalICFGNode() const
Definition ICFG.h:244
NodeID getBlkPtr() const
Definition IRGraph.h:255
void meet_with(const IntervalValue &other)
Return a intersected IntervalValue.
static BoundedInt minus_infinity()
Get minus infinity -inf.
static BoundedInt plus_infinity()
Get plus infinity +inf.
static IntervalValue top()
Create the IntervalValue [-inf, +inf].
const BoundedInt & lb() const
Return the lower bound.
static const OptionMap< u32_t > HandleRecur
recursion handling mode, Default: TOP
Definition Options.h:246
static const Option< u32_t > MaxFieldLimit
Maximum number of field derivations for an object.
Definition Options.h:35
static const Option< u32_t > WidenDelay
Definition Options.h:244
static const Option< bool > PStat
Definition Options.h:116
bool inSameCallGraphSCC(const FunObjVar *fun1, const FunObjVar *fun2)
Return TRUE if this edge is inside a PTACallGraph SCC, i.e., src node and dst node are in the same SC...
bool isInRecursion(const FunObjVar *fun) const
const Map< const FunObjVar *, const ICFGWTO * > & getFuncToWTO() const
Accessors for WTO data.
Definition PreAnalysis.h:91
const Set< const ICFGNode * > getUseSitesOfObjVar(const ObjVar *obj, const ICFGNode *node) const
Given an ObjVar and its def-site ICFGNode, find all use-site ICFGNodes.
AndersenWaveDiff * getPointerAnalysis() const
Accessors for Andersen's results.
Definition PreAnalysis.h:56
void initWTO()
Build WTO for each function using call graph SCC.
CallGraph * getCallGraph() const
Definition PreAnalysis.h:60
const CallSiteToFunPtrMap & getIndirectCallsites() const
Add/get indirect callsites.
Definition SVFIR.h:443
static SVFIR * getPAG(bool buildFromFile=false)
Singleton design here to make sure we only have one instance during any analysis.
Definition SVFIR.h:116
virtual void endClk()
Definition SVFStat.h:63
virtual void startClk()
Definition SVFStat.h:58
ICFGNode * getICFGNode() const
u32_t getByteSize() const
Definition SVFType.h:289
virtual const std::string & getName() const
Definition SVFValue.h:186
bool isExtCall(const FunObjVar *fun)
Definition SVFUtil.cpp:437
for isBitcode
Definition BasicTypes.h:68
u32_t NodeID
Definition GeneralType.h:56
s64_t APOffset
Definition GeneralType.h:60
signed short s16_t
Definition GeneralType.h:54
unsigned short u16_t
Definition GeneralType.h:53
WTONode< ICFG > ICFGSingletonWTO
Definition ICFGWTO.h:45
llvm::IRBuilder IRBuilder
Definition BasicTypes.h:74
signed s32_t
Definition GeneralType.h:48
unsigned u32_t
Definition GeneralType.h:47
signed long long s64_t
Definition GeneralType.h:50
WTOComponent< ICFG > ICFGWTOComp
Definition ICFGWTO.h:44