Static Value-Flow Analysis
Loading...
Searching...
No Matches
AbstractInterpretation.cpp
Go to the documentation of this file.
1//===- AbstractExecution.cpp -- Abstract Execution---------------------------------//
2//
3// SVF: Static Value-Flow Analysis
4//
5// Copyright (C) <2013-> <Yulei Sui>
6//
7
8// This program is free software: you can redistribute it and/or modify
9// it under the terms of the GNU Affero General Public License as published by
10// the Free Software Foundation, either version 3 of the License, or
11// (at your option) any later version.
12
13// This program is distributed in the hope that it will be useful,
14// but WITHOUT ANY WARRANTY; without even the implied warranty of
15// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16// GNU Affero General Public License for more details.
17
18// You should have received a copy of the GNU Affero General Public License
19// along with this program. If not, see <http://www.gnu.org/licenses/>.
20//
21//===----------------------------------------------------------------------===//
22
23
24//
25// Created on: Jan 10, 2024
26// Author: Xiao Cheng, Jiawei Wang
27//
28
30#include "AE/Svfexe/AbsExtAPI.h"
31#include "SVFIR/SVFIR.h"
32#include "Util/Options.h"
33#include "Util/WorkList.h"
34#include "Graphs/CallGraph.h"
35#include "WPA/Andersen.h"
36#include <cmath>
37
38using namespace SVF;
39using namespace SVFUtil;
40using namespace z3;
41
42
44{
45 stat->startClk();
46 icfg = _icfg;
49
52
53 analyse();
55 stat->endClk();
57 if (Options::PStat())
59 for (auto& detector: detectors)
60 detector->reportBug();
61}
62
69{
70 delete stat;
71 for (auto it: funcToWTO)
72 delete it.second;
73}
74
82void AbstractInterpretation::collectCycleHeads(const std::list<const ICFGWTOComp*>& comps)
83{
84 for (const ICFGWTOComp* comp : comps)
85 {
86 if (const ICFGCycleWTO* cycle = SVFUtil::dyn_cast<ICFGCycleWTO>(comp))
87 {
88 cycleHeadToCycle[cycle->head()->getICFGNode()] = cycle;
89 // Recursively collect nested cycle heads
90 collectCycleHeads(cycle->getWTOComponents());
91 }
92 }
93}
94
96{
98 // Detect if the call graph has cycles by finding its strongly connected components (SCC)
100 callGraphScc->find();
101 CallGraph* callGraph = ander->getCallGraph();
102
103 // Iterate through the call graph
104 for (auto it = callGraph->begin(); it != callGraph->end(); it++)
105 {
106 // Check if the current function is part of a cycle
107 if (callGraphScc->isInCycle(it->second->getId()))
108 recursiveFuns.insert(it->second->getFunction()); // Mark the function as recursive
109
110 // Calculate ICFGWTO for each function/recursion
111 const FunObjVar *fun = it->second->getFunction();
112 if (fun->isDeclaration())
113 continue;
114
115 NodeID repNodeId = callGraphScc->repNode(it->second->getId());
116 auto cgSCCNodes = callGraphScc->subNodes(repNodeId);
117
118 // Identify if this node is an SCC entry (nodes who have incoming edges
119 // from nodes outside the SCC). Also identify non-recursive callsites.
120 bool isEntry = false;
121 if (it->second->getInEdges().empty())
122 isEntry = true;
123 for (auto inEdge: it->second->getInEdges())
124 {
125 NodeID srcNodeId = inEdge->getSrcID();
126 if (!cgSCCNodes.test(srcNodeId))
127 {
128 isEntry = true;
129 const CallICFGNode *callSite = nullptr;
130 if (inEdge->isDirectCallEdge())
131 callSite = *(inEdge->getDirectCalls().begin());
132 else if (inEdge->isIndirectCallEdge())
133 callSite = *(inEdge->getIndirectCalls().begin());
134 else
135 assert(false && "CallGraphEdge must "
136 "be either direct or indirect!");
137
139 {callSite, inEdge->getDstNode()->getFunction()->getId()});
140 }
141 }
142
143 // Compute IWTO for the function partition entered from each partition entry
144 if (isEntry)
145 {
147 for (const auto& node: cgSCCNodes)
148 {
149 funcScc.insert(callGraph->getGNode(node)->getFunction());
150 }
152 iwto->init();
153 funcToWTO[it->second->getFunction()] = iwto;
154 }
155 }
156
157 // Build cycleHeadToCycle map for all functions
158 // This maps cycle head nodes to their corresponding WTO cycles for efficient lookup
159 for (auto& [func, wto] : funcToWTO)
160 {
161 collectCycleHeads(wto->getWTOComponents());
162 }
163}
164
167{
168 initWTO();
169 // handle Global ICFGNode of SVFModule
173 if (const CallGraphNode* cgn = svfir->getCallGraph()->getCallGraphNode("main"))
174 {
175 // Use worklist-based function handling instead of recursive WTO component handling
176 const ICFGNode* mainEntry = icfg->getFunEntryICFGNode(cgn->getFunction());
178 }
179}
180
183{
184 const ICFGNode* node = icfg->getGlobalICFGNode();
187 // Global Node, we just need to handle addr, load, store, copy and gep
188 for (const SVFStmt *stmt: node->getSVFStmts())
189 {
191 }
192}
193
198{
199 std::vector<AbstractState> workList;
201 for (auto& edge: icfgNode->getInEdges())
202 {
203 if (abstractTrace.find(edge->getSrcNode()) != abstractTrace.end())
204 {
205
206 if (const IntraCFGEdge *intraCfgEdge =
207 SVFUtil::dyn_cast<IntraCFGEdge>(edge))
208 {
209 AbstractState tmpEs = abstractTrace[edge->getSrcNode()];
210 if (intraCfgEdge->getCondition())
211 {
213 {
214 workList.push_back(tmpEs);
215 }
216 else
217 {
218 // do nothing
219 }
220 }
221 else
222 {
223 workList.push_back(tmpEs);
224 }
225 }
226 else if (const CallCFGEdge *callCfgEdge =
227 SVFUtil::dyn_cast<CallCFGEdge>(edge))
228 {
229
230 // context insensitive implementation
231 workList.push_back(
232 abstractTrace[callCfgEdge->getSrcNode()]);
233 }
234 else if (const RetCFGEdge *retCfgEdge =
235 SVFUtil::dyn_cast<RetCFGEdge>(edge))
236 {
237 // Only include return edge if the corresponding callsite was processed
238 // (skipped recursive callsites in WIDEN_ONLY/WIDEN_NARROW won't have state)
239 const RetICFGNode* retNode = SVFUtil::dyn_cast<RetICFGNode>(icfgNode);
240 if (hasAbsStateFromTrace(retNode->getCallICFGNode()))
241 {
242 workList.push_back(abstractTrace[retCfgEdge->getSrcNode()]);
243 }
244 }
245 else
246 assert(false && "Unhandled ICFGEdge type encountered!");
247 }
248 }
249 if (workList.size() == 0)
250 {
251 return false;
252 }
253 else
254 {
255 while (!workList.empty())
256 {
257 preAs.joinWith(workList.back());
258 workList.pop_back();
259 }
260 // Has ES on the in edges - Feasible block
261 // update post as
262 abstractTrace[icfgNode] = preAs;
263 return true;
264 }
265}
266
267
270{
272 // get cmp stmt's op0, op1, and predicate
273 NodeID op0 = cmpStmt->getOpVarID(0);
274 NodeID op1 = cmpStmt->getOpVarID(1);
275 NodeID res_id = cmpStmt->getResID();
276 s32_t predicate = cmpStmt->getPredicate();
277 // if op0 or op1 is nullptr, no need to change value, just copy the state
279 {
280 as = new_es;
281 return true;
282 }
283 // if op0 or op1 is undefined, return;
284 // skip address compare
285 if (new_es.inVarToAddrsTable(op0) || new_es.inVarToAddrsTable(op1))
286 {
287 as = new_es;
288 return true;
289 }
290 const LoadStmt *load_op0 = nullptr;
291 const LoadStmt *load_op1 = nullptr;
292 // get '%1 = load i32 s', and load inst may not exist
294 if (!loadVar0->getInEdges().empty())
295 {
296 SVFStmt *loadVar0InStmt = *loadVar0->getInEdges().begin();
297 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar0InStmt))
298 {
300 }
301 else if (const CopyStmt *copyStmt = SVFUtil::dyn_cast<CopyStmt>(loadVar0InStmt))
302 {
303 loadVar0 = svfir->getGNode(copyStmt->getRHSVarID());
304 if (!loadVar0->getInEdges().empty())
305 {
306 SVFStmt *loadVar0InStmt2 = *loadVar0->getInEdges().begin();
307 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar0InStmt2))
308 {
310 }
311 }
312 }
313 }
314
316 if (!loadVar1->getInEdges().empty())
317 {
318 SVFStmt *loadVar1InStmt = *loadVar1->getInEdges().begin();
319 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar1InStmt))
320 {
322 }
323 else if (const CopyStmt *copyStmt = SVFUtil::dyn_cast<CopyStmt>(loadVar1InStmt))
324 {
325 loadVar1 = svfir->getGNode(copyStmt->getRHSVarID());
326 if (!loadVar1->getInEdges().empty())
327 {
328 SVFStmt *loadVar1InStmt2 = *loadVar1->getInEdges().begin();
329 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar1InStmt2))
330 {
332 }
333 }
334 }
335 }
336 // for const X const, we may get concrete resVal instantly
337 // for var X const, we may get [0,1] if the intersection of var and const is not empty set
338 IntervalValue resVal = new_es[res_id].getInterval();
340 // If Var X const generates bottom value, it means this branch path is not feasible.
341 if (resVal.isBottom())
342 {
343 return false;
344 }
345
346 bool b0 = new_es[op0].getInterval().is_numeral();
347 bool b1 = new_es[op1].getInterval().is_numeral();
348
349 // if const X var, we should reverse op0 and op1.
350 if (b0 && !b1)
351 {
352 std::swap(op0, op1);
353 std::swap(load_op0, load_op1);
354 predicate = _switch_lhsrhs_predicate[predicate];
355 }
356 else
357 {
358 // if var X var, we cannot preset the branch condition to infer the intervals of var0,var1
359 if (!b0 && !b1)
360 {
361 as = new_es;
362 return true;
363 }
364 // if const X const, we can instantly get the resVal
365 else if (b0 && b1)
366 {
367 as = new_es;
368 return true;
369 }
370 }
371 // if cmp is 'var X const == false', we should reverse predicate 'var X' const == true'
372 // X' is reverse predicate of X
373 if (succ == 0)
374 {
375 predicate = _reverse_predicate[predicate];
376 }
377 else {}
378 // change interval range according to the compare predicate
379 AddressValue addrs;
380 if(load_op0 && new_es.inVarToAddrsTable(load_op0->getRHSVarID()))
381 addrs = new_es[load_op0->getRHSVarID()].getAddrs();
382
383 IntervalValue &lhs = new_es[op0].getInterval(), &rhs = new_es[op1].getInterval();
384 switch (predicate)
385 {
389 {
390 // Var == Const, so [var.lb, var.ub].meet_with(const)
392 // if lhs is register value, we should also change its mem obj
393 for (const auto &addr: addrs)
394 {
395 NodeID objId = new_es.getIDFromAddr(addr);
396 if (new_es.inAddrToValTable(objId))
397 {
398 new_es.load(addr).meet_with(rhs);
399 }
400 }
401 break;
402 }
406 // Compliment set
407 break;
412 // Var > Const, so [var.lb, var.ub].meet_with([Const+1, +INF])
413 lhs.meet_with(IntervalValue(rhs.lb() + 1, IntervalValue::plus_infinity()));
414 // if lhs is register value, we should also change its mem obj
415 for (const auto &addr: addrs)
416 {
417 NodeID objId = new_es.getIDFromAddr(addr);
418 if (new_es.inAddrToValTable(objId))
419 {
420 new_es.load(addr).meet_with(
422 }
423 }
424 break;
429 {
430 // Var >= Const, so [var.lb, var.ub].meet_with([Const, +INF])
432 // if lhs is register value, we should also change its mem obj
433 for (const auto &addr: addrs)
434 {
435 NodeID objId = new_es.getIDFromAddr(addr);
436 if (new_es.inAddrToValTable(objId))
437 {
438 new_es.load(addr).meet_with(
440 }
441 }
442 break;
443 }
448 {
449 // Var < Const, so [var.lb, var.ub].meet_with([-INF, const.ub-1])
450 lhs.meet_with(IntervalValue(IntervalValue::minus_infinity(), rhs.ub() - 1));
451 // if lhs is register value, we should also change its mem obj
452 for (const auto &addr: addrs)
453 {
454 NodeID objId = new_es.getIDFromAddr(addr);
455 if (new_es.inAddrToValTable(objId))
456 {
457 new_es.load(addr).meet_with(
459 }
460 }
461 break;
462 }
467 {
468 // Var <= Const, so [var.lb, var.ub].meet_with([-INF, const.ub])
470 // if lhs is register value, we should also change its mem obj
471 for (const auto &addr: addrs)
472 {
473 NodeID objId = new_es.getIDFromAddr(addr);
474 if (new_es.inAddrToValTable(objId))
475 {
476 new_es.load(addr).meet_with(
478 }
479 }
480 break;
481 }
483 break;
485 break;
486 default:
487 assert(false && "implement this part");
488 abort();
489 }
490 as = new_es;
491 return true;
492}
493
496{
498 IntervalValue& switch_cond = new_es[var->getId()].getInterval();
499 s64_t value = succ;
501 for (SVFStmt *cmpVarInStmt: var->getInEdges())
502 {
504 }
505 switch_cond.meet_with(IntervalValue(value, value));
506 if (switch_cond.isBottom())
507 {
508 return false;
509 }
510 while(!workList.empty())
511 {
512 const SVFStmt* stmt = workList.pop();
513 if (SVFUtil::isa<CopyStmt>(stmt))
514 {
515 IntervalValue& copy_cond = new_es[var->getId()].getInterval();
516 copy_cond.meet_with(IntervalValue(value, value));
517 }
518 else if (const LoadStmt* load = SVFUtil::dyn_cast<LoadStmt>(stmt))
519 {
520 if (new_es.inVarToAddrsTable(load->getRHSVarID()))
521 {
522 AddressValue &addrs = new_es[load->getRHSVarID()].getAddrs();
523 for (const auto &addr: addrs)
524 {
525 NodeID objId = new_es.getIDFromAddr(addr);
526 if (new_es.inAddrToValTable(objId))
527 {
529 }
530 }
531 }
532 }
533 }
534 as = new_es;
535 return true;
536}
537
540{
541 const SVFVar *cmpVar = intraEdge->getCondition();
542 if (cmpVar->getInEdges().empty())
543 {
545 intraEdge->getSuccessorCondValue(), as);
546 }
547 else
548 {
549 assert(!cmpVar->getInEdges().empty() &&
550 "no in edges?");
551 SVFStmt *cmpVarInStmt = *cmpVar->getInEdges().begin();
552 if (const CmpStmt *cmpStmt = SVFUtil::dyn_cast<CmpStmt>(cmpVarInStmt))
553 {
555 intraEdge->getSuccessorCondValue(), as);
556 }
557 else
558 {
560 cmpVar, intraEdge->getSuccessorCondValue(), as);
561 }
562 }
563 return true;
564}
567{
568 const ICFGNode* node = icfgSingletonWto->getICFGNode();
569 stat->getBlockTrace()++;
570
571 std::deque<const ICFGNode*> worklist;
572
574 // handle SVF Stmt
575 for (const SVFStmt *stmt: node->getSVFStmts())
576 {
578 }
579 // inlining the callee by calling handleFunc for the callee function
580 if (const CallICFGNode* callnode = SVFUtil::dyn_cast<CallICFGNode>(node))
581 {
583 }
584 for (auto& detector: detectors)
585 detector->detect(getAbsStateFromTrace(node), node);
587}
588
594{
595 // Store the previous state for fixpoint detection
598 if (hadPrevState)
599 prevState = abstractTrace[node];
600
601 // For function entry nodes, initialize state from caller or global
602 bool isFunEntry = SVFUtil::isa<FunEntryICFGNode>(node);
603 if (isFunEntry)
604 {
605 // Try to merge from predecessors first (handles call edges)
607 {
608 // No predecessors with state - initialize from caller or global
609 if (!callSiteStack.empty())
610 {
611 // Get state from the most recent call site
612 const CallICFGNode* caller = callSiteStack.back();
614 {
616 }
617 else
618 {
620 }
621 }
622 else
623 {
624 // This is the main function entry, inherit from global node
627 {
629 }
630 else
631 {
633 }
634 }
635 }
636 }
637 else
638 {
639 // Merge states from predecessors
641 return false;
642 }
643
644 stat->getBlockTrace()++;
646
647 // Handle SVF statements
648 for (const SVFStmt *stmt: node->getSVFStmts())
649 {
651 }
652
653 // Handle call sites
654 if (const CallICFGNode* callNode = SVFUtil::dyn_cast<CallICFGNode>(node))
655 {
657 }
658
659 // Run detectors
660 for (auto& detector: detectors)
661 detector->detect(getAbsStateFromTrace(node), node);
663
664 // Check if state changed (for fixpoint detection)
665 // For entry nodes on first visit, always return true to process successors
666 if (isFunEntry && !hadPrevState)
667 return true;
668
669 if (abstractTrace[node] == prevState)
670 return false;
671
672 return true;
673}
674
679std::vector<const ICFGNode*> AbstractInterpretation::getNextNodes(const ICFGNode* node) const
680{
681 std::vector<const ICFGNode*> outEdges;
682 for (const ICFGEdge* edge : node->getOutEdges())
683 {
684 const ICFGNode* dst = edge->getDstNode();
685 // Only nodes inside the same function are included
686 if (dst->getFun() == node->getFun())
687 {
688 outEdges.push_back(dst);
689 }
690 }
691 if (const CallICFGNode* callNode = SVFUtil::dyn_cast<CallICFGNode>(node))
692 {
693 // Shortcut to the RetICFGNode
694 const ICFGNode* retNode = callNode->getRetICFGNode();
695 outEdges.push_back(retNode);
696 }
697 return outEdges;
698}
699
704std::vector<const ICFGNode*> AbstractInterpretation::getNextNodesOfCycle(const ICFGCycleWTO* cycle) const
705{
707 // Insert the head of the cycle and all component nodes
708 cycleNodes.insert(cycle->head()->getICFGNode());
709 for (const ICFGWTOComp* comp : cycle->getWTOComponents())
710 {
711 if (const ICFGSingletonWTO* singleton = SVFUtil::dyn_cast<ICFGSingletonWTO>(comp))
712 {
713 cycleNodes.insert(singleton->getICFGNode());
714 }
715 else if (const ICFGCycleWTO* subCycle = SVFUtil::dyn_cast<ICFGCycleWTO>(comp))
716 {
717 cycleNodes.insert(subCycle->head()->getICFGNode());
718 }
719 }
720
721 std::vector<const ICFGNode*> outEdges;
722
723 // Check head's successors
724 std::vector<const ICFGNode*> nextNodes = getNextNodes(cycle->head()->getICFGNode());
725 for (const ICFGNode* nextNode : nextNodes)
726 {
727 // Only nodes that point outside the cycle are included
728 if (cycleNodes.find(nextNode) == cycleNodes.end())
729 {
730 outEdges.push_back(nextNode);
731 }
732 }
733
734 // Check each component's successors
735 for (const ICFGWTOComp* comp : cycle->getWTOComponents())
736 {
737 if (const ICFGSingletonWTO* singleton = SVFUtil::dyn_cast<ICFGSingletonWTO>(comp))
738 {
739 std::vector<const ICFGNode*> compNextNodes = getNextNodes(singleton->getICFGNode());
740 for (const ICFGNode* nextNode : compNextNodes)
741 {
742 if (cycleNodes.find(nextNode) == cycleNodes.end())
743 {
744 outEdges.push_back(nextNode);
745 }
746 }
747 }
748 // Skip inner cycles - they are handled within the outer cycle
749 }
750 return outEdges;
751}
752
758{
760 worklist.push(funEntry);
761
762 while (!worklist.empty())
763 {
764 const ICFGNode* node = worklist.pop();
765
766 // Check if this node is a cycle head
767 if (cycleHeadToCycle.find(node) != cycleHeadToCycle.end())
768 {
769 const ICFGCycleWTO* cycle = cycleHeadToCycle[node];
771
772 // Push nodes outside the cycle to the worklist
773 std::vector<const ICFGNode*> cycleNextNodes = getNextNodesOfCycle(cycle);
774 for (const ICFGNode* nextNode : cycleNextNodes)
775 {
776 worklist.push(nextNode);
777 }
778 }
779 else
780 {
781 // Handle regular node
782 if (!handleICFGNode(node))
783 {
784 // Fixpoint reached or infeasible, skip successors
785 continue;
786 }
787
788 // Push successor nodes to the worklist
789 std::vector<const ICFGNode*> nextNodes = getNextNodes(node);
790 for (const ICFGNode* nextNode : nextNodes)
791 {
792 worklist.push(nextNode);
793 }
794 }
795 }
796}
797
798
800{
801 if (const CallICFGNode* callNode = SVFUtil::dyn_cast<CallICFGNode>(node))
802 {
803 if (isExtCall(callNode))
804 {
806 }
807 else
808 {
809 // Handle both direct and indirect calls uniformly
811 }
812 }
813 else
814 assert (false && "it is not call node");
815}
816
818{
819 return SVFUtil::isExtCall(callNode->getCalledFunction());
820}
821
823{
824 callSiteStack.push_back(callNode);
826 for (auto& detector : detectors)
827 {
828 detector->handleStubFunctions(callNode);
829 }
830 callSiteStack.pop_back();
831}
832
835{
836 return recursiveFuns.find(fun) != recursiveFuns.end();
837}
838
841{
842 const FunObjVar *callfun = callNode->getCalledFunction();
843 if (!callfun)
844 return false;
845 else
846 return isRecursiveFun(callfun);
847}
848
851{
854 const RetICFGNode *retNode = callNode->getRetICFGNode();
855 if (retNode->getSVFStmts().size() > 0)
856 {
857 if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(*retNode->getSVFStmts().begin()))
858 {
859 if (!retPE->getLHSVar()->isPointer() &&
860 !retPE->getLHSVar()->isConstDataOrAggDataButNotNullPtr())
861 {
862 as[retPE->getLHSVarID()] = IntervalValue::top();
863 }
864 }
865 }
867}
868
876
879{
880 // Direct call: get callee directly from call node
881 if (const FunObjVar* callee = callNode->getCalledFunction())
882 return callee;
883
884 // Indirect call: resolve callee through pointer analysis
886 auto it = callsiteMaps.find(callNode);
887 if (it == callsiteMaps.end())
888 return nullptr;
889
890 NodeID call_id = it->second;
892 return nullptr;
893
895 if (!as.inVarToAddrsTable(call_id))
896 return nullptr;
897
899 if (Addrs.getAddrs().empty())
900 return nullptr;
901
903 SVFVar* func_var = svfir->getGNode(as.getIDFromAddr(addr));
904 return SVFUtil::dyn_cast<FunObjVar>(func_var);
905}
906
909{
911 if (!callee)
912 return false;
913
914 // Non-recursive function: never skip, always inline
916 return false;
917
918 // For recursive functions, skip only recursive callsites (within same SCC).
919 // Entry calls (from outside SCC) are not skipped - they are inlined so that
920 // handleLoopOrRecursion() can analyze the function body.
921 // This applies uniformly to all modes (TOP/WIDEN_ONLY/WIDEN_NARROW).
923}
924
927{
928 // Non-recursive functions (regular loops): always apply narrowing
929 if (!isRecursiveFun(fun))
930 return true;
931
932 // Recursive functions: WIDEN_NARROW applies narrowing, WIDEN_ONLY does not
933 // TOP mode exits early in handleLoopOrRecursion, so should not reach here
934 switch (Options::HandleRecur())
935 {
936 case TOP:
937 assert(false && "TOP mode should not reach narrowing phase for recursive functions");
938 return false;
939 case WIDEN_ONLY:
940 return false; // Skip narrowing for recursive functions
941 case WIDEN_NARROW:
942 return true; // Apply narrowing for recursive functions
943 default:
944 assert(false && "Unknown recursion handling mode");
945 return false;
946 }
947}
950{
953
954 // Skip recursive callsites (within SCC); entry calls are not skipped
956 return;
957
959 if (!callee)
960 return;
961
962 callSiteStack.push_back(callNode);
963
966
967 callSiteStack.pop_back();
968 const RetICFGNode* retNode = callNode->getRetICFGNode();
970}
971
1013{
1014 const ICFGNode* cycle_head = cycle->head()->getICFGNode();
1015
1016 // TOP mode for recursive function cycles: use recursiveCallPass to set
1017 // all stores and return value to TOP, maintaining original semantics
1018 if (Options::HandleRecur() == TOP && isRecursiveFun(cycle_head->getFun()))
1019 {
1020 // Get the call node from callSiteStack (the call that entered this function)
1021 if (!callSiteStack.empty())
1022 {
1023 const CallICFGNode* callNode = callSiteStack.back();
1025 }
1026 return;
1027 }
1028
1029 // WIDEN_ONLY / WIDEN_NARROW modes (and regular loops): iterate until fixpoint
1030 bool increasing = true;
1032
1033 for (u32_t cur_iter = 0;; cur_iter++)
1034 {
1035 // Get the abstract state before processing the cycle head
1039
1040 // Process the cycle head node
1043
1044 // Start widening or narrowing if cur_iter >= widen delay threshold
1045 if (cur_iter >= widen_delay)
1046 {
1047 if (increasing)
1048 {
1049 // Apply widening operator
1051
1053 {
1054 // Widening fixpoint reached, switch to narrowing phase
1055 increasing = false;
1056 continue;
1057 }
1058 }
1059 else
1060 {
1061 // Narrowing phase - check if narrowing should be applied
1062 if (!shouldApplyNarrowing(cycle_head->getFun()))
1063 {
1064 break;
1065 }
1066
1067 // Apply narrowing
1070 {
1071 // Narrowing fixpoint reached, exit loop
1072 break;
1073 }
1074 }
1075 }
1076
1077 // Process cycle body components
1078 for (const ICFGWTOComp* comp : cycle->getWTOComponents())
1079 {
1080 if (const ICFGSingletonWTO* singleton = SVFUtil::dyn_cast<ICFGSingletonWTO>(comp))
1081 {
1082 handleICFGNode(singleton->getICFGNode());
1083 }
1084 else if (const ICFGCycleWTO* subCycle = SVFUtil::dyn_cast<ICFGCycleWTO>(comp))
1085 {
1086 // Handle nested cycle recursively
1088 }
1089 }
1090 }
1091}
1092
1094{
1095 if (const AddrStmt *addr = SVFUtil::dyn_cast<AddrStmt>(stmt))
1096 {
1098 }
1099 else if (const BinaryOPStmt *binary = SVFUtil::dyn_cast<BinaryOPStmt>(stmt))
1100 {
1102 }
1103 else if (const CmpStmt *cmp = SVFUtil::dyn_cast<CmpStmt>(stmt))
1104 {
1106 }
1107 else if (SVFUtil::isa<UnaryOPStmt>(stmt))
1108 {
1109 }
1110 else if (SVFUtil::isa<BranchStmt>(stmt))
1111 {
1112 // branch stmt is handled in hasBranchES
1113 }
1114 else if (const LoadStmt *load = SVFUtil::dyn_cast<LoadStmt>(stmt))
1115 {
1116 updateStateOnLoad(load);
1117 }
1118 else if (const StoreStmt *store = SVFUtil::dyn_cast<StoreStmt>(stmt))
1119 {
1120 updateStateOnStore(store);
1121 }
1122 else if (const CopyStmt *copy = SVFUtil::dyn_cast<CopyStmt>(stmt))
1123 {
1125 }
1126 else if (const GepStmt *gep = SVFUtil::dyn_cast<GepStmt>(stmt))
1127 {
1129 }
1130 else if (const SelectStmt *select = SVFUtil::dyn_cast<SelectStmt>(stmt))
1131 {
1133 }
1134 else if (const PhiStmt *phi = SVFUtil::dyn_cast<PhiStmt>(stmt))
1135 {
1137 }
1138 else if (const CallPE *callPE = SVFUtil::dyn_cast<CallPE>(stmt))
1139 {
1140 // To handle Call Edge
1142 }
1143 else if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(stmt))
1144 {
1145 updateStateOnRet(retPE);
1146 }
1147 else
1148 assert(false && "implement this part");
1149 // NullPtr is index 0, it should not be changed
1150 assert(!getAbsStateFromTrace(stmt->getICFGNode())[IRGraph::NullPtr].isInterval() &&
1151 !getAbsStateFromTrace(stmt->getICFGNode())[IRGraph::NullPtr].isAddr());
1152}
1153
1156{
1158 const RetICFGNode *retNode = callNode->getRetICFGNode();
1159 if (retNode->getSVFStmts().size() > 0)
1160 {
1161 if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(*retNode->getSVFStmts().begin()))
1162 {
1164 if (!retPE->getLHSVar()->isPointer() && !retPE->getLHSVar()->isConstDataOrAggDataButNotNullPtr())
1165 as[retPE->getLHSVarID()] = IntervalValue::top();
1166 }
1167 }
1168 if (!retNode->getOutEdges().empty())
1169 {
1170 if (retNode->getOutEdges().size() == 1)
1171 {
1172
1173 }
1174 else
1175 {
1176 return;
1177 }
1178 }
1181 for (const SVFBasicBlock * bb: callNode->getCalledFunction()->getReachableBBs())
1182 {
1183 for (const ICFGNode* node: bb->getICFGNodeList())
1184 {
1185 for (const SVFStmt *stmt: node->getSVFStmts())
1186 {
1187 if (const StoreStmt *store = SVFUtil::dyn_cast<StoreStmt>(stmt))
1188 {
1189 const SVFVar *rhsVar = store->getRHSVar();
1190 u32_t lhs = store->getLHSVarID();
1191 if (as.inVarToAddrsTable(lhs))
1192 {
1193 if (!rhsVar->isPointer() && !rhsVar->isConstDataOrAggDataButNotNullPtr())
1194 {
1195 const AbstractValue &addrs = as[lhs];
1196 for (const auto &addr: addrs.getAddrs())
1197 {
1198 as.store(addr, IntervalValue::top());
1199 }
1200 }
1201 }
1202 }
1203 }
1204 }
1205 }
1206}
1207
1208// count the size of memory map
1210{
1211 if (count == 0)
1212 {
1213 generalNumMap["ES_Var_AVG_Num"] = 0;
1214 generalNumMap["ES_Loc_AVG_Num"] = 0;
1215 generalNumMap["ES_Var_Addr_AVG_Num"] = 0;
1216 generalNumMap["ES_Loc_Addr_AVG_Num"] = 0;
1217 }
1218 ++count;
1219}
1220
1222{
1224 if (count > 0)
1225 {
1226 generalNumMap["ES_Var_AVG_Num"] /= count;
1227 generalNumMap["ES_Loc_AVG_Num"] /= count;
1228 generalNumMap["ES_Var_Addr_AVG_Num"] /= count;
1229 generalNumMap["ES_Loc_Addr_AVG_Num"] /= count;
1230 }
1231 generalNumMap["SVF_STMT_NUM"] = count;
1232 generalNumMap["ICFG_Node_Num"] = _ae->svfir->getICFG()->nodeNum;
1233 u32_t callSiteNum = 0;
1236 for (const auto &it: *_ae->svfir->getICFG())
1237 {
1238 if (it.second->getFun())
1239 {
1240 funs.insert(it.second->getFun());
1241 }
1242 if (const CallICFGNode *callNode = dyn_cast<CallICFGNode>(it.second))
1243 {
1244 if (!isExtCall(callNode))
1245 {
1246 callSiteNum++;
1247 }
1248 else
1249 {
1251 }
1252 }
1253 }
1254 generalNumMap["Func_Num"] = funs.size();
1255 generalNumMap["EXT_CallSite_Num"] = extCallSiteNum;
1256 generalNumMap["NonEXT_CallSite_Num"] = callSiteNum;
1257 timeStatMap["Total_Time(sec)"] = (double)(endTime - startTime) / TIMEINTERVAL;
1258
1259}
1260
1262{
1263 std::string fullName(_ae->moduleName);
1264 std::string name;
1265 std::string moduleName;
1266 if (fullName.find('/') == std::string::npos)
1267 {
1268 std::string name = fullName;
1269 moduleName = name.substr(0, fullName.find('.'));
1270 }
1271 else
1272 {
1273 std::string name = fullName.substr(fullName.find('/'), fullName.size());
1274 moduleName = name.substr(0, fullName.find('.'));
1275 }
1276
1277 SVFUtil::outs() << "\n************************\n";
1278 SVFUtil::outs() << "################ (program : " << moduleName << ")###############\n";
1279 SVFUtil::outs().flags(std::ios::left);
1280 unsigned field_width = 30;
1281 for (NUMStatMap::iterator it = generalNumMap.begin(), eit = generalNumMap.end(); it != eit; ++it)
1282 {
1283 // format out put with width 20 space
1284 std::cout << std::setw(field_width) << it->first << it->second << "\n";
1285 }
1286 SVFUtil::outs() << "-------------------------------------------------------\n";
1287 for (TIMEStatMap::iterator it = timeStatMap.begin(), eit = timeStatMap.end(); it != eit; ++it)
1288 {
1289 // format out put with width 20 space
1290 SVFUtil::outs() << std::setw(field_width) << it->first << it->second << "\n";
1291 }
1292 SVFUtil::outs() << "Memory usage: " << memUsage << "\n";
1293
1294 SVFUtil::outs() << "#######################################################" << std::endl;
1295 SVFUtil::outs().flush();
1296}
1297
1299{
1300 // traverse every ICFGNode
1301 Set<std::string> ae_checkpoint_names = {"svf_assert"};
1302 Set<std::string> buf_checkpoint_names = {"UNSAFE_BUFACCESS", "SAFE_BUFACCESS"};
1303 Set<std::string> nullptr_checkpoint_names = {"UNSAFE_LOAD", "SAFE_LOAD"};
1304
1305 for (auto it = svfir->getICFG()->begin(); it != svfir->getICFG()->end(); ++it)
1306 {
1307 const ICFGNode* node = it->second;
1308 if (const CallICFGNode *call = SVFUtil::dyn_cast<CallICFGNode>(node))
1309 {
1310 if (const FunObjVar *fun = call->getCalledFunction())
1311 {
1312 if (ae_checkpoint_names.find(fun->getName()) !=
1313 ae_checkpoint_names.end())
1314 {
1315 checkpoints.insert(call);
1316 }
1318 {
1319 if (buf_checkpoint_names.find(fun->getName()) !=
1321 {
1322 checkpoints.insert(call);
1323 }
1324 }
1326 {
1327 if (nullptr_checkpoint_names.find(fun->getName()) !=
1329 {
1330 checkpoints.insert(call);
1331 }
1332 }
1333 }
1334 }
1335 }
1336}
1337
1339{
1340 if (checkpoints.size() == 0)
1341 {
1342 return;
1343 }
1344 else
1345 {
1346 SVFUtil::errs() << SVFUtil::errMsg("At least one svf_assert has not been checked!!") << "\n";
1347 for (const CallICFGNode* call: checkpoints)
1348 SVFUtil::errs() << call->toString() + "\n";
1349 assert(false);
1350 }
1351
1352}
1353
1355{
1356 AbstractState& as = getAbsStateFromTrace(gep->getICFGNode());
1357 u32_t rhs = gep->getRHSVarID();
1358 u32_t lhs = gep->getLHSVarID();
1359 IntervalValue offsetPair = as.getElementIndex(gep);
1361 APOffset lb = offsetPair.lb().getIntNumeral() < Options::MaxFieldLimit()?
1362 offsetPair.lb().getIntNumeral(): Options::MaxFieldLimit();
1363 APOffset ub = offsetPair.ub().getIntNumeral() < Options::MaxFieldLimit()?
1364 offsetPair.ub().getIntNumeral(): Options::MaxFieldLimit();
1365 for (APOffset i = lb; i <= ub; i++)
1366 gepAddrs.join_with(as.getGepObjAddrs(rhs,IntervalValue(i)));
1367 as[lhs] = gepAddrs;
1368}
1369
1371{
1372 AbstractState& as = getAbsStateFromTrace(select->getICFGNode());
1373 u32_t res = select->getResID();
1374 u32_t tval = select->getTrueValue()->getId();
1375 u32_t fval = select->getFalseValue()->getId();
1376 u32_t cond = select->getCondition()->getId();
1377 if (as[cond].getInterval().is_numeral())
1378 {
1379 as[res] = as[cond].getInterval().is_zero() ? as[fval] : as[tval];
1380 }
1381 else
1382 {
1383 as[res] = as[tval];
1384 as[res].join_with(as[fval]);
1385 }
1386}
1387
1389{
1390 const ICFGNode* icfgNode = phi->getICFGNode();
1392 u32_t res = phi->getResID();
1394 for (u32_t i = 0; i < phi->getOpVarNum(); i++)
1395 {
1396 NodeID curId = phi->getOpVarID(i);
1397 const ICFGNode* opICFGNode = phi->getOpICFGNode(i);
1399 {
1403 // if IntraEdge, check the condition, if it is feasible, join the value
1404 // if IntraEdge but not conditional edge, join the value
1405 // if not IntraEdge, join the value
1406 if (edge)
1407 {
1408 const IntraCFGEdge* intraEdge = SVFUtil::cast<IntraCFGEdge>(edge);
1409 if (intraEdge->getCondition())
1410 {
1412 rhs.join_with(opAs[curId]);
1413 }
1414 else
1415 rhs.join_with(opAs[curId]);
1416 }
1417 else
1418 {
1419 rhs.join_with(opAs[curId]);
1420 }
1421 }
1422 }
1423 as[res] = rhs;
1424}
1425
1426
1428{
1429 AbstractState& as = getAbsStateFromTrace(callPE->getICFGNode());
1430 NodeID lhs = callPE->getLHSVarID();
1431 NodeID rhs = callPE->getRHSVarID();
1432 as[lhs] = as[rhs];
1433}
1434
1436{
1438 NodeID lhs = retPE->getLHSVarID();
1439 NodeID rhs = retPE->getRHSVarID();
1440 as[lhs] = as[rhs];
1441}
1442
1443
1445{
1446 AbstractState& as = getAbsStateFromTrace(addr->getICFGNode());
1447 as.initObjVar(SVFUtil::cast<ObjVar>(addr->getRHSVar()));
1448 if (addr->getRHSVar()->getType()->getKind() == SVFType::SVFIntegerTy)
1449 as[addr->getRHSVarID()].getInterval().meet_with(utils->getRangeLimitFromType(addr->getRHSVar()->getType()));
1450 as[addr->getLHSVarID()] = as[addr->getRHSVarID()];
1451}
1452
1453
1455{
1459 const ICFGNode* node = binary->getICFGNode();
1461 u32_t op0 = binary->getOpVarID(0);
1462 u32_t op1 = binary->getOpVarID(1);
1463 u32_t res = binary->getResID();
1464 if (!as.inVarToValTable(op0)) as[op0] = IntervalValue::top();
1465 if (!as.inVarToValTable(op1)) as[op1] = IntervalValue::top();
1466 IntervalValue &lhs = as[op0].getInterval(), &rhs = as[op1].getInterval();
1468 switch (binary->getOpcode())
1469 {
1470 case BinaryOPStmt::Add:
1471 case BinaryOPStmt::FAdd:
1472 resVal = (lhs + rhs);
1473 break;
1474 case BinaryOPStmt::Sub:
1475 case BinaryOPStmt::FSub:
1476 resVal = (lhs - rhs);
1477 break;
1478 case BinaryOPStmt::Mul:
1479 case BinaryOPStmt::FMul:
1480 resVal = (lhs * rhs);
1481 break;
1482 case BinaryOPStmt::SDiv:
1483 case BinaryOPStmt::FDiv:
1484 case BinaryOPStmt::UDiv:
1485 resVal = (lhs / rhs);
1486 break;
1487 case BinaryOPStmt::SRem:
1488 case BinaryOPStmt::FRem:
1489 case BinaryOPStmt::URem:
1490 resVal = (lhs % rhs);
1491 break;
1492 case BinaryOPStmt::Xor:
1493 resVal = (lhs ^ rhs);
1494 break;
1495 case BinaryOPStmt::And:
1496 resVal = (lhs & rhs);
1497 break;
1498 case BinaryOPStmt::Or:
1499 resVal = (lhs | rhs);
1500 break;
1501 case BinaryOPStmt::AShr:
1502 resVal = (lhs >> rhs);
1503 break;
1504 case BinaryOPStmt::Shl:
1505 resVal = (lhs << rhs);
1506 break;
1507 case BinaryOPStmt::LShr:
1508 resVal = (lhs >> rhs);
1509 break;
1510 default:
1511 assert(false && "undefined binary: ");
1512 }
1513 as[res] = resVal;
1514}
1515
1517{
1518 AbstractState& as = getAbsStateFromTrace(cmp->getICFGNode());
1519 u32_t op0 = cmp->getOpVarID(0);
1520 u32_t op1 = cmp->getOpVarID(1);
1521 // if it is address
1522 if (as.inVarToAddrsTable(op0) && as.inVarToAddrsTable(op1))
1523 {
1525 AddressValue addrOp0 = as[op0].getAddrs();
1526 AddressValue addrOp1 = as[op1].getAddrs();
1527 u32_t res = cmp->getResID();
1528 if (addrOp0.equals(addrOp1))
1529 {
1530 resVal = IntervalValue(1, 1);
1531 }
1532 else if (addrOp0.hasIntersect(addrOp1))
1533 {
1534 resVal = IntervalValue(0, 1);
1535 }
1536 else
1537 {
1538 resVal = IntervalValue(0, 0);
1539 }
1540 as[res] = resVal;
1541 }
1542 // if op0 or op1 is nullptr, compare abstractValue instead of touching addr or interval
1543 else if (op0 == IRGraph::NullPtr || op1 == IRGraph::NullPtr)
1544 {
1545 u32_t res = cmp->getResID();
1546 IntervalValue resVal = (as[op0].equals(as[op1])) ? IntervalValue(1, 1) : IntervalValue(0, 0);
1547 as[res] = resVal;
1548 }
1549 else
1550 {
1551 if (!as.inVarToValTable(op0))
1553 if (!as.inVarToValTable(op1))
1555 u32_t res = cmp->getResID();
1556 if (as.inVarToValTable(op0) && as.inVarToValTable(op1))
1557 {
1559 if (as[op0].isInterval() && as[op1].isInterval())
1560 {
1561 IntervalValue &lhs = as[op0].getInterval(),
1562 &rhs = as[op1].getInterval();
1563 // AbstractValue
1564 auto predicate = cmp->getPredicate();
1565 switch (predicate)
1566 {
1567 case CmpStmt::ICMP_EQ:
1568 case CmpStmt::FCMP_OEQ:
1569 case CmpStmt::FCMP_UEQ:
1570 resVal = (lhs == rhs);
1571 // resVal = (lhs.getInterval() == rhs.getInterval());
1572 break;
1573 case CmpStmt::ICMP_NE:
1574 case CmpStmt::FCMP_ONE:
1575 case CmpStmt::FCMP_UNE:
1576 resVal = (lhs != rhs);
1577 break;
1578 case CmpStmt::ICMP_UGT:
1579 case CmpStmt::ICMP_SGT:
1580 case CmpStmt::FCMP_OGT:
1581 case CmpStmt::FCMP_UGT:
1582 resVal = (lhs > rhs);
1583 break;
1584 case CmpStmt::ICMP_UGE:
1585 case CmpStmt::ICMP_SGE:
1586 case CmpStmt::FCMP_OGE:
1587 case CmpStmt::FCMP_UGE:
1588 resVal = (lhs >= rhs);
1589 break;
1590 case CmpStmt::ICMP_ULT:
1591 case CmpStmt::ICMP_SLT:
1592 case CmpStmt::FCMP_OLT:
1593 case CmpStmt::FCMP_ULT:
1594 resVal = (lhs < rhs);
1595 break;
1596 case CmpStmt::ICMP_ULE:
1597 case CmpStmt::ICMP_SLE:
1598 case CmpStmt::FCMP_OLE:
1599 case CmpStmt::FCMP_ULE:
1600 resVal = (lhs <= rhs);
1601 break;
1603 resVal = IntervalValue(0, 0);
1604 break;
1605 case CmpStmt::FCMP_TRUE:
1606 resVal = IntervalValue(1, 1);
1607 break;
1608 default:
1609 assert(false && "undefined compare: ");
1610 }
1611 as[res] = resVal;
1612 }
1613 else if (as[op0].isAddr() && as[op1].isAddr())
1614 {
1615 AddressValue &lhs = as[op0].getAddrs(),
1616 &rhs = as[op1].getAddrs();
1617 auto predicate = cmp->getPredicate();
1618 switch (predicate)
1619 {
1620 case CmpStmt::ICMP_EQ:
1621 case CmpStmt::FCMP_OEQ:
1622 case CmpStmt::FCMP_UEQ:
1623 {
1624 if (lhs.hasIntersect(rhs))
1625 {
1626 resVal = IntervalValue(0, 1);
1627 }
1628 else if (lhs.empty() && rhs.empty())
1629 {
1630 resVal = IntervalValue(1, 1);
1631 }
1632 else
1633 {
1634 resVal = IntervalValue(0, 0);
1635 }
1636 break;
1637 }
1638 case CmpStmt::ICMP_NE:
1639 case CmpStmt::FCMP_ONE:
1640 case CmpStmt::FCMP_UNE:
1641 {
1642 if (lhs.hasIntersect(rhs))
1643 {
1644 resVal = IntervalValue(0, 1);
1645 }
1646 else if (lhs.empty() && rhs.empty())
1647 {
1648 resVal = IntervalValue(0, 0);
1649 }
1650 else
1651 {
1652 resVal = IntervalValue(1, 1);
1653 }
1654 break;
1655 }
1656 case CmpStmt::ICMP_UGT:
1657 case CmpStmt::ICMP_SGT:
1658 case CmpStmt::FCMP_OGT:
1659 case CmpStmt::FCMP_UGT:
1660 {
1661 if (lhs.size() == 1 && rhs.size() == 1)
1662 {
1663 resVal = IntervalValue(*lhs.begin() > *rhs.begin());
1664 }
1665 else
1666 {
1667 resVal = IntervalValue(0, 1);
1668 }
1669 break;
1670 }
1671 case CmpStmt::ICMP_UGE:
1672 case CmpStmt::ICMP_SGE:
1673 case CmpStmt::FCMP_OGE:
1674 case CmpStmt::FCMP_UGE:
1675 {
1676 if (lhs.size() == 1 && rhs.size() == 1)
1677 {
1678 resVal = IntervalValue(*lhs.begin() >= *rhs.begin());
1679 }
1680 else
1681 {
1682 resVal = IntervalValue(0, 1);
1683 }
1684 break;
1685 }
1686 case CmpStmt::ICMP_ULT:
1687 case CmpStmt::ICMP_SLT:
1688 case CmpStmt::FCMP_OLT:
1689 case CmpStmt::FCMP_ULT:
1690 {
1691 if (lhs.size() == 1 && rhs.size() == 1)
1692 {
1693 resVal = IntervalValue(*lhs.begin() < *rhs.begin());
1694 }
1695 else
1696 {
1697 resVal = IntervalValue(0, 1);
1698 }
1699 break;
1700 }
1701 case CmpStmt::ICMP_ULE:
1702 case CmpStmt::ICMP_SLE:
1703 case CmpStmt::FCMP_OLE:
1704 case CmpStmt::FCMP_ULE:
1705 {
1706 if (lhs.size() == 1 && rhs.size() == 1)
1707 {
1708 resVal = IntervalValue(*lhs.begin() <= *rhs.begin());
1709 }
1710 else
1711 {
1712 resVal = IntervalValue(0, 1);
1713 }
1714 break;
1715 }
1717 resVal = IntervalValue(0, 0);
1718 break;
1719 case CmpStmt::FCMP_TRUE:
1720 resVal = IntervalValue(1, 1);
1721 break;
1722 default:
1723 assert(false && "undefined compare: ");
1724 }
1725 as[res] = resVal;
1726 }
1727 }
1728 }
1729}
1730
1732{
1734 u32_t rhs = load->getRHSVarID();
1735 u32_t lhs = load->getLHSVarID();
1736 as[lhs] = as.loadValue(rhs);
1737}
1738
1740{
1742 u32_t rhs = store->getRHSVarID();
1743 u32_t lhs = store->getLHSVarID();
1744 as.storeValue(lhs, as[rhs]);
1745}
1746
1748{
1749 auto getZExtValue = [](AbstractState& as, const SVFVar* var)
1750 {
1751 const SVFType* type = var->getType();
1752 if (SVFUtil::isa<SVFIntegerType>(type))
1753 {
1754 u32_t bits = type->getByteSize() * 8;
1755 if (as[var->getId()].getInterval().is_numeral())
1756 {
1757 if (bits == 8)
1758 {
1759 int8_t signed_i8_value = as[var->getId()].getInterval().getIntNumeral();
1762 }
1763 else if (bits == 16)
1764 {
1765 s16_t signed_i16_value = as[var->getId()].getInterval().getIntNumeral();
1768 }
1769 else if (bits == 32)
1770 {
1771 s32_t signed_i32_value = as[var->getId()].getInterval().getIntNumeral();
1774 }
1775 else if (bits == 64)
1776 {
1777 s64_t signed_i64_value = as[var->getId()].getInterval().getIntNumeral();
1779 // we only support i64 at most
1780 }
1781 else
1782 assert(false && "cannot support int type other than u8/16/32/64");
1783 }
1784 else
1785 {
1786 return IntervalValue::top(); // TODO: may have better solution
1787 }
1788 }
1789 return IntervalValue::top(); // TODO: may have better solution
1790 };
1791
1792 auto getTruncValue = [&](const AbstractState& as, const SVFVar* var,
1793 const SVFType* dstType)
1794 {
1795 const IntervalValue& itv = as[var->getId()].getInterval();
1796 if(itv.isBottom()) return itv;
1797 // get the value of ub and lb
1799 s64_t int_ub = itv.ub().getIntNumeral();
1800 // get dst type
1801 u32_t dst_bits = dstType->getByteSize() * 8;
1802 if (dst_bits == 8)
1803 {
1804 // get the signed value of ub and lb
1805 int8_t s8_lb = static_cast<int8_t>(int_lb);
1806 int8_t s8_ub = static_cast<int8_t>(int_ub);
1807 if (s8_lb > s8_ub)
1808 {
1809 // return range of s8
1811 }
1812 return IntervalValue(s8_lb, s8_ub);
1813 }
1814 else if (dst_bits == 16)
1815 {
1816 // get the signed value of ub and lb
1817 s16_t s16_lb = static_cast<s16_t>(int_lb);
1818 s16_t s16_ub = static_cast<s16_t>(int_ub);
1819 if (s16_lb > s16_ub)
1820 {
1821 // return range of s16
1823 }
1824 return IntervalValue(s16_lb, s16_ub);
1825 }
1826 else if (dst_bits == 32)
1827 {
1828 // get the signed value of ub and lb
1829 s32_t s32_lb = static_cast<s32_t>(int_lb);
1830 s32_t s32_ub = static_cast<s32_t>(int_ub);
1831 if (s32_lb > s32_ub)
1832 {
1833 // return range of s32
1835 }
1836 return IntervalValue(s32_lb, s32_ub);
1837 }
1838 else
1839 {
1840 assert(false && "cannot support dst int type other than u8/16/32");
1841 abort();
1842 }
1843 };
1844
1845 AbstractState& as = getAbsStateFromTrace(copy->getICFGNode());
1846 u32_t lhs = copy->getLHSVarID();
1847 u32_t rhs = copy->getRHSVarID();
1848
1849 if (copy->getCopyKind() == CopyStmt::COPYVAL)
1850 {
1851 as[lhs] = as[rhs];
1852 }
1853 else if (copy->getCopyKind() == CopyStmt::ZEXT)
1854 {
1855 as[lhs] = getZExtValue(as, copy->getRHSVar());
1856 }
1857 else if (copy->getCopyKind() == CopyStmt::SEXT)
1858 {
1859 as[lhs] = as[rhs].getInterval();
1860 }
1861 else if (copy->getCopyKind() == CopyStmt::FPTOSI)
1862 {
1863 as[lhs] = as[rhs].getInterval();
1864 }
1865 else if (copy->getCopyKind() == CopyStmt::FPTOUI)
1866 {
1867 as[lhs] = as[rhs].getInterval();
1868 }
1869 else if (copy->getCopyKind() == CopyStmt::SITOFP)
1870 {
1871 as[lhs] = as[rhs].getInterval();
1872 }
1873 else if (copy->getCopyKind() == CopyStmt::UITOFP)
1874 {
1875 as[lhs] = as[rhs].getInterval();
1876 }
1877 else if (copy->getCopyKind() == CopyStmt::TRUNC)
1878 {
1879 as[lhs] = getTruncValue(as, copy->getRHSVar(), copy->getLHSVar()->getType());
1880 }
1881 else if (copy->getCopyKind() == CopyStmt::FPTRUNC)
1882 {
1883 as[lhs] = as[rhs].getInterval();
1884 }
1885 else if (copy->getCopyKind() == CopyStmt::INTTOPTR)
1886 {
1887 //insert nullptr
1888 }
1889 else if (copy->getCopyKind() == CopyStmt::PTRTOINT)
1890 {
1892 }
1893 else if (copy->getCopyKind() == CopyStmt::BITCAST)
1894 {
1895 if (as[rhs].isAddr())
1896 {
1897 as[lhs] = as[rhs];
1898 }
1899 else
1900 {
1901 // do nothing
1902 }
1903 }
1904 else
1905 assert(false && "undefined copy kind");
1906}
1907
1908
1909
#define TIMEINTERVAL
Definition SVFType.h:621
newitem type
Definition cJSON.cpp:2739
copy
Definition cJSON.cpp:414
const char *const name
Definition cJSON.h:264
void performStat() override
std::string getMemUsage()
AbstractInterpretation * _ae
Handles external API calls and manages abstract states.
Definition AbsExtAPI.h:44
void handleExtAPI(const CallICFGNode *call)
Handles an external API call.
IntervalValue getRangeLimitFromType(const SVFType *type)
Gets the range limit from a type.
void updateStateOnCall(const CallPE *callPE)
const FunObjVar * getCallee(const CallICFGNode *callNode)
Get callee function: directly for direct calls, via pointer analysis for indirect calls.
std::vector< const ICFGNode * > getNextNodesOfCycle(const ICFGCycleWTO *cycle) const
virtual bool isRecursiveCall(const CallICFGNode *callNode)
Check if a call node calls a recursive function.
virtual void recursiveCallPass(const CallICFGNode *callNode)
Handle recursive call in TOP mode: set all stores and return value to TOP.
void updateStateOnStore(const StoreStmt *store)
bool hasAbsStateFromTrace(const ICFGNode *node)
void collectCycleHeads(const std::list< const ICFGWTOComp * > &comps)
Recursively collect cycle heads from nested WTO components.
virtual void handleFunCall(const CallICFGNode *callNode)
Handle direct or indirect call: get callee, process function body, set return state.
Set< const FunObjVar * > recursiveFuns
void updateStateOnGep(const GepStmt *gep)
virtual void handleGlobalNode()
Global ICFGNode is handled at the entry of the program,.
bool mergeStatesFromPredecessors(const ICFGNode *icfgNode)
virtual bool isExtCall(const CallICFGNode *callNode)
void initWTO()
Compute IWTO for each function partition entry.
void updateStateOnPhi(const PhiStmt *phi)
bool handleICFGNode(const ICFGNode *node)
bool isBranchFeasible(const IntraCFGEdge *intraEdge, AbstractState &as)
std::vector< std::unique_ptr< AEDetector > > detectors
std::vector< const ICFGNode * > getNextNodes(const ICFGNode *node) const
virtual void handleExtCall(const CallICFGNode *callNode)
AbstractState & getAbsStateFromTrace(const ICFGNode *node)
Retrieves the abstract state from the trace for a given ICFG node.
Set< const CallICFGNode * > checkpoints
bool isSwitchBranchFeasible(const SVFVar *var, s64_t succ, AbstractState &as)
void updateStateOnSelect(const SelectStmt *select)
virtual void handleSVFStatement(const SVFStmt *stmt)
bool skipRecursiveCall(const CallICFGNode *callNode)
Skip recursive callsites (within SCC); entry calls from outside SCC are not skipped.
SVFIR * svfir
protected data members, also used in subclasses
virtual void runOnModule(ICFG *icfg)
void handleFunction(const ICFGNode *funEntry)
virtual bool isRecursiveCallSite(const CallICFGNode *callNode, const FunObjVar *)
Check if a call is a recursive callsite (within same SCC, not entry call from outside)
bool shouldApplyNarrowing(const FunObjVar *fun)
Check if narrowing should be applied: always for regular loops, mode-dependent for recursion.
virtual bool isRecursiveFun(const FunObjVar *fun)
Check if a function is recursive (part of a call graph SCC)
void updateStateOnAddr(const AddrStmt *addr)
virtual ~AbstractInterpretation()
Destructor.
bool isCmpBranchFeasible(const CmpStmt *cmpStmt, s64_t succ, AbstractState &as)
Map< const ICFGNode *, const ICFGCycleWTO * > cycleHeadToCycle
virtual void handleCallSite(const ICFGNode *node)
void updateStateOnRet(const RetPE *retPE)
void updateStateOnCopy(const CopyStmt *copy)
Set< std::pair< const CallICFGNode *, NodeID > > nonRecursiveCallSites
void updateStateOnLoad(const LoadStmt *load)
void updateStateOnBinary(const BinaryOPStmt *binary)
Map< const ICFGNode *, AbstractState > abstractTrace
std::vector< const CallICFGNode * > callSiteStack
virtual void handleSingletonWTO(const ICFGSingletonWTO *icfgSingletonWto)
handle instructions in svf basic blocks
virtual void setTopToObjInRecursion(const CallICFGNode *callnode)
Set all store values in a recursive function to TOP (used in TOP mode)
Map< s32_t, s32_t > _switch_lhsrhs_predicate
void updateStateOnCmp(const CmpStmt *cmp)
virtual void handleLoopOrRecursion(const ICFGCycleWTO *cycle)
Map< const FunObjVar *, const ICFGWTO * > funcToWTO
AbstractState narrowing(const AbstractState &other)
domain narrow with other, and return the narrowed domain
AbstractState widening(const AbstractState &other)
domain widen with other, and return the widened domain
AddressValue & getAddrs()
bool meet_with(const AddressValue &other)
Return a intersected AddressValue.
AddrSet::const_iterator begin() const
static AndersenWaveDiff * createAndersenWaveDiff(SVFIR *_pag)
Create an singleton instance directly instead of invoking llvm pass manager.
Definition Andersen.h:407
NodeID getRHSVarID() const
NodeID getLHSVarID() const
s64_t getIntNumeral() const
const CallGraphNode * getCallGraphNode(const std::string &name) const
Get call graph node.
@ ICMP_SGT
signed greater than
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ ICMP_UGE
unsigned greater or equal
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ ICMP_ULE
unsigned less or equal
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ ICMP_NE
not equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_ULT
unsigned less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ ICMP_SLT
signed less than
@ ICMP_UGT
unsigned greater than
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_SLE
signed less or equal
bool push(const Data &data)
Definition WorkList.h:165
bool empty() const
Definition WorkList.h:146
virtual const FunObjVar * getFunction() const
Get containing function, or null for globals/constants.
bool isDeclaration() const
iterator begin()
Iterators.
u32_t nodeNum
total num of edge
NodeType * getGNode(NodeID id) const
Get a node.
const GEdgeSetTy & getOutEdges() const
const GEdgeSetTy & getInEdges() const
virtual const FunObjVar * getFun() const
Return the function of this ICFGNode.
Definition ICFGNode.h:74
virtual const std::string toString() const
Definition ICFG.cpp:49
const SVFStmtList & getSVFStmts() const
Definition ICFGNode.h:115
ICFGEdge * getICFGEdge(const ICFGNode *src, const ICFGNode *dst, ICFGEdge::ICFGEdgeK kind)
Get a SVFG edge according to src and dst.
Definition ICFG.cpp:311
FunEntryICFGNode * getFunEntryICFGNode(const FunObjVar *fun)
Add a function entry node.
Definition ICFG.cpp:242
GlobalICFGNode * getGlobalICFGNode() const
Definition ICFG.h:244
NodeID getBlkPtr() const
Definition IRGraph.h:255
void meet_with(const IntervalValue &other)
Return a intersected IntervalValue.
static BoundedInt minus_infinity()
Get minus infinity -inf.
static BoundedInt plus_infinity()
Get plus infinity +inf.
static IntervalValue top()
Create the IntervalValue [-inf, +inf].
const BoundedInt & lb() const
Return the lower bound.
static const OptionMap< u32_t > HandleRecur
recursion handling mode, Default: TOP
Definition Options.h:245
static const Option< u32_t > MaxFieldLimit
Maximum number of field derivations for an object.
Definition Options.h:35
static const Option< u32_t > WidenDelay
Definition Options.h:243
static const Option< bool > NullDerefCheck
nullptr dereference checker, Default: false
Definition Options.h:253
static const Option< bool > PStat
Definition Options.h:116
static const Option< bool > BufferOverflowCheck
buffer overflow checker, Default: false
Definition Options.h:251
CallGraph * getCallGraph() const
Return call graph.
SCCDetection< CallGraph * > CallGraphSCC
CallGraphSCC * getCallGraphSCC() const
Return call graph SCC.
const CallGraph * getCallGraph()
Get CG.
Definition SVFIR.h:180
ICFG * getICFG() const
Definition SVFIR.h:163
const CallSiteToFunPtrMap & getIndirectCallsites() const
Add/get indirect callsites.
Definition SVFIR.h:379
static SVFIR * getPAG(bool buildFromFile=false)
Singleton design here to make sure we only have one instance during any analysis.
Definition SVFIR.h:116
NUMStatMap generalNumMap
Definition SVFStat.h:76
virtual void endClk()
Definition SVFStat.h:63
std::string moduleName
Definition SVFStat.h:99
TIMEStatMap timeStatMap
Definition SVFStat.h:78
virtual void startClk()
Definition SVFStat.h:58
double endTime
Definition SVFStat.h:81
double startTime
Definition SVFStat.h:80
ICFGNode * getICFGNode() const
u32_t getByteSize() const
Definition SVFType.h:289
std::string errMsg(const std::string &msg)
Print error message by converting a string into red string output.
Definition SVFUtil.cpp:78
std::ostream & errs()
Overwrite llvm::errs()
Definition SVFUtil.h:58
bool isExtCall(const FunObjVar *fun)
Definition SVFUtil.cpp:437
std::ostream & outs()
Overwrite llvm::outs()
Definition SVFUtil.h:52
for isBitcode
Definition BasicTypes.h:68
u32_t NodeID
Definition GeneralType.h:56
s64_t APOffset
Definition GeneralType.h:60
signed short s16_t
Definition GeneralType.h:54
unsigned short u16_t
Definition GeneralType.h:53
llvm::IRBuilder IRBuilder
Definition BasicTypes.h:74
signed s32_t
Definition GeneralType.h:48
unsigned u32_t
Definition GeneralType.h:47
signed long long s64_t
Definition GeneralType.h:50