Static Value-Flow Analysis
Loading...
Searching...
No Matches
AbstractInterpretation.cpp
Go to the documentation of this file.
1//===- AbstractExecution.cpp -- Abstract Execution---------------------------------//
2//
3// SVF: Static Value-Flow Analysis
4//
5// Copyright (C) <2013-> <Yulei Sui>
6//
7
8// This program is free software: you can redistribute it and/or modify
9// it under the terms of the GNU Affero General Public License as published by
10// the Free Software Foundation, either version 3 of the License, or
11// (at your option) any later version.
12
13// This program is distributed in the hope that it will be useful,
14// but WITHOUT ANY WARRANTY; without even the implied warranty of
15// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16// GNU Affero General Public License for more details.
17
18// You should have received a copy of the GNU Affero General Public License
19// along with this program. If not, see <http://www.gnu.org/licenses/>.
20//
21//===----------------------------------------------------------------------===//
22
23
24//
25// Created by Jiawei Wang on 2024/1/10.
26//
27
29#include "AE/Svfexe/AbsExtAPI.h"
30#include "SVFIR/SVFIR.h"
31#include "Util/Options.h"
32#include "Util/WorkList.h"
33#include "Graphs/CallGraph.h"
34#include "WPA/Andersen.h"
35#include <cmath>
36
37using namespace SVF;
38using namespace SVFUtil;
39using namespace z3;
40
41
43{
44 stat->startClk();
45 icfg = _icfg;
48
51
52 analyse();
54 stat->endClk();
56 if (Options::PStat())
58 for (auto& detector: detectors)
59 detector->reportBug();
60}
61
68{
69 delete stat;
70 for (auto it: funcToWTO)
71 delete it.second;
72}
73
84{
86 // Detect if the call graph has cycles by finding its strongly connected components (SCC)
88 callGraphScc->find();
89 CallGraph* callGraph = ander->getCallGraph();
90
91 // Iterate through the call graph
92 for (auto it = callGraph->begin(); it != callGraph->end(); it++)
93 {
94 // Check if the current function is part of a cycle
95 if (callGraphScc->isInCycle(it->second->getId()))
96 recursiveFuns.insert(it->second->getFunction()); // Mark the function as recursive
97
98 // In TOP mode, calculate the WTO
100 {
101 if (it->second->getFunction()->isDeclaration())
102 continue;
103 auto* wto = new ICFGWTO(icfg, icfg->getFunEntryICFGNode(it->second->getFunction()));
104 wto->init();
105 funcToWTO[it->second->getFunction()] = wto;
106 }
107 // In WIDEN_TOP or WIDEN_NARROW mode, calculate the IWTO
108 else if (Options::HandleRecur() == WIDEN_ONLY ||
110 {
111 const FunObjVar *fun = it->second->getFunction();
112 if (fun->isDeclaration())
113 continue;
114
115 NodeID repNodeId = callGraphScc->repNode(it->second->getId());
116 auto cgSCCNodes = callGraphScc->subNodes(repNodeId);
117
118 // Identify if this node is an SCC entry (nodes who have incoming edges
119 // from nodes outside the SCC). Also identify non-recursive callsites.
120 bool isEntry = false;
121 if (it->second->getInEdges().empty())
122 isEntry = true;
123 for (auto inEdge: it->second->getInEdges())
124 {
125 NodeID srcNodeId = inEdge->getSrcID();
126 if (!cgSCCNodes.test(srcNodeId))
127 {
128 isEntry = true;
129 const CallICFGNode *callSite = nullptr;
130 if (inEdge->isDirectCallEdge())
131 callSite = *(inEdge->getDirectCalls().begin());
132 else if (inEdge->isIndirectCallEdge())
133 callSite = *(inEdge->getIndirectCalls().begin());
134 else
135 assert(false && "CallGraphEdge must "
136 "be either direct or indirect!");
137
139 {callSite, inEdge->getDstNode()->getFunction()->getId()});
140 }
141 }
142
143 // Compute IWTO for the function partition entered from each partition entry
144 if (isEntry)
145 {
147 cgSCCNodes, callGraph);
148 iwto->init();
149 funcToWTO[it->second->getFunction()] = iwto;
150 }
151 }
152 else
153 assert(false && "Invalid recursion mode specified!");
154 }
155}
156
159{
160 initWTO();
161 // handle Global ICFGNode of SVFModule
165 if (const CallGraphNode* cgn = svfir->getCallGraph()->getCallGraphNode("main"))
166 {
167 const ICFGWTO* wto = funcToWTO[cgn->getFunction()];
168 handleWTOComponents(wto->getWTOComponents());
169 }
170}
171
174{
175 const ICFGNode* node = icfg->getGlobalICFGNode();
178 // Global Node, we just need to handle addr, load, store, copy and gep
179 for (const SVFStmt *stmt: node->getSVFStmts())
180 {
182 }
183}
184
189{
190 std::vector<AbstractState> workList;
192 for (auto& edge: icfgNode->getInEdges())
193 {
194 if (abstractTrace.find(edge->getSrcNode()) != abstractTrace.end())
195 {
196
197 if (const IntraCFGEdge *intraCfgEdge =
198 SVFUtil::dyn_cast<IntraCFGEdge>(edge))
199 {
200 AbstractState tmpEs = abstractTrace[edge->getSrcNode()];
201 if (intraCfgEdge->getCondition())
202 {
204 {
205 workList.push_back(tmpEs);
206 }
207 else
208 {
209 // do nothing
210 }
211 }
212 else
213 {
214 workList.push_back(tmpEs);
215 }
216 }
217 else if (const CallCFGEdge *callCfgEdge =
218 SVFUtil::dyn_cast<CallCFGEdge>(edge))
219 {
220
221 // context insensitive implementation
222 workList.push_back(
223 abstractTrace[callCfgEdge->getSrcNode()]);
224 }
225 else if (const RetCFGEdge *retCfgEdge =
226 SVFUtil::dyn_cast<RetCFGEdge>(edge))
227 {
228 switch (Options::HandleRecur())
229 {
230 case TOP:
231 {
232 workList.push_back(abstractTrace[retCfgEdge->getSrcNode()]);
233 break;
234 }
235 case WIDEN_ONLY:
236 case WIDEN_NARROW:
237 {
238 const RetICFGNode* returnSite = SVFUtil::dyn_cast<RetICFGNode>(icfgNode);
239 const CallICFGNode* callSite = returnSite->getCallICFGNode();
241 workList.push_back(abstractTrace[retCfgEdge->getSrcNode()]);
242 }
243 }
244 }
245 else
246 assert(false && "Unhandled ICFGEdge type encountered!");
247 }
248 }
249 if (workList.size() == 0)
250 {
251 return false;
252 }
253 else
254 {
255 while (!workList.empty())
256 {
257 preAs.joinWith(workList.back());
258 workList.pop_back();
259 }
260 // Has ES on the in edges - Feasible block
261 // update post as
262 abstractTrace[icfgNode] = preAs;
263 return true;
264 }
265}
266
267
270{
272 // get cmp stmt's op0, op1, and predicate
273 NodeID op0 = cmpStmt->getOpVarID(0);
274 NodeID op1 = cmpStmt->getOpVarID(1);
275 NodeID res_id = cmpStmt->getResID();
276 s32_t predicate = cmpStmt->getPredicate();
277 // if op0 or op1 is nullptr, no need to change value, just copy the state
279 {
280 as = new_es;
281 return true;
282 }
283 // if op0 or op1 is undefined, return;
284 // skip address compare
285 if (new_es.inVarToAddrsTable(op0) || new_es.inVarToAddrsTable(op1))
286 {
287 as = new_es;
288 return true;
289 }
290 const LoadStmt *load_op0 = nullptr;
291 const LoadStmt *load_op1 = nullptr;
292 // get '%1 = load i32 s', and load inst may not exist
294 if (!loadVar0->getInEdges().empty())
295 {
296 SVFStmt *loadVar0InStmt = *loadVar0->getInEdges().begin();
297 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar0InStmt))
298 {
300 }
301 else if (const CopyStmt *copyStmt = SVFUtil::dyn_cast<CopyStmt>(loadVar0InStmt))
302 {
303 loadVar0 = svfir->getGNode(copyStmt->getRHSVarID());
304 if (!loadVar0->getInEdges().empty())
305 {
306 SVFStmt *loadVar0InStmt2 = *loadVar0->getInEdges().begin();
307 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar0InStmt2))
308 {
310 }
311 }
312 }
313 }
314
316 if (!loadVar1->getInEdges().empty())
317 {
318 SVFStmt *loadVar1InStmt = *loadVar1->getInEdges().begin();
319 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar1InStmt))
320 {
322 }
323 else if (const CopyStmt *copyStmt = SVFUtil::dyn_cast<CopyStmt>(loadVar1InStmt))
324 {
325 loadVar1 = svfir->getGNode(copyStmt->getRHSVarID());
326 if (!loadVar1->getInEdges().empty())
327 {
328 SVFStmt *loadVar1InStmt2 = *loadVar1->getInEdges().begin();
329 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar1InStmt2))
330 {
332 }
333 }
334 }
335 }
336 // for const X const, we may get concrete resVal instantly
337 // for var X const, we may get [0,1] if the intersection of var and const is not empty set
338 IntervalValue resVal = new_es[res_id].getInterval();
340 // If Var X const generates bottom value, it means this branch path is not feasible.
341 if (resVal.isBottom())
342 {
343 return false;
344 }
345
346 bool b0 = new_es[op0].getInterval().is_numeral();
347 bool b1 = new_es[op1].getInterval().is_numeral();
348
349 // if const X var, we should reverse op0 and op1.
350 if (b0 && !b1)
351 {
352 std::swap(op0, op1);
353 std::swap(load_op0, load_op1);
354 predicate = _switch_lhsrhs_predicate[predicate];
355 }
356 else
357 {
358 // if var X var, we cannot preset the branch condition to infer the intervals of var0,var1
359 if (!b0 && !b1)
360 {
361 as = new_es;
362 return true;
363 }
364 // if const X const, we can instantly get the resVal
365 else if (b0 && b1)
366 {
367 as = new_es;
368 return true;
369 }
370 }
371 // if cmp is 'var X const == false', we should reverse predicate 'var X' const == true'
372 // X' is reverse predicate of X
373 if (succ == 0)
374 {
375 predicate = _reverse_predicate[predicate];
376 }
377 else {}
378 // change interval range according to the compare predicate
379 AddressValue addrs;
380 if(load_op0 && new_es.inVarToAddrsTable(load_op0->getRHSVarID()))
381 addrs = new_es[load_op0->getRHSVarID()].getAddrs();
382
383 IntervalValue &lhs = new_es[op0].getInterval(), &rhs = new_es[op1].getInterval();
384 switch (predicate)
385 {
389 {
390 // Var == Const, so [var.lb, var.ub].meet_with(const)
392 // if lhs is register value, we should also change its mem obj
393 for (const auto &addr: addrs)
394 {
395 NodeID objId = new_es.getIDFromAddr(addr);
396 if (new_es.inAddrToValTable(objId))
397 {
398 new_es.load(addr).meet_with(rhs);
399 }
400 }
401 break;
402 }
406 // Compliment set
407 break;
412 // Var > Const, so [var.lb, var.ub].meet_with([Const+1, +INF])
413 lhs.meet_with(IntervalValue(rhs.lb() + 1, IntervalValue::plus_infinity()));
414 // if lhs is register value, we should also change its mem obj
415 for (const auto &addr: addrs)
416 {
417 NodeID objId = new_es.getIDFromAddr(addr);
418 if (new_es.inAddrToValTable(objId))
419 {
420 new_es.load(addr).meet_with(
422 }
423 }
424 break;
429 {
430 // Var >= Const, so [var.lb, var.ub].meet_with([Const, +INF])
432 // if lhs is register value, we should also change its mem obj
433 for (const auto &addr: addrs)
434 {
435 NodeID objId = new_es.getIDFromAddr(addr);
436 if (new_es.inAddrToValTable(objId))
437 {
438 new_es.load(addr).meet_with(
440 }
441 }
442 break;
443 }
448 {
449 // Var < Const, so [var.lb, var.ub].meet_with([-INF, const.ub-1])
450 lhs.meet_with(IntervalValue(IntervalValue::minus_infinity(), rhs.ub() - 1));
451 // if lhs is register value, we should also change its mem obj
452 for (const auto &addr: addrs)
453 {
454 NodeID objId = new_es.getIDFromAddr(addr);
455 if (new_es.inAddrToValTable(objId))
456 {
457 new_es.load(addr).meet_with(
459 }
460 }
461 break;
462 }
467 {
468 // Var <= Const, so [var.lb, var.ub].meet_with([-INF, const.ub])
470 // if lhs is register value, we should also change its mem obj
471 for (const auto &addr: addrs)
472 {
473 NodeID objId = new_es.getIDFromAddr(addr);
474 if (new_es.inAddrToValTable(objId))
475 {
476 new_es.load(addr).meet_with(
478 }
479 }
480 break;
481 }
483 break;
485 break;
486 default:
487 assert(false && "implement this part");
488 abort();
489 }
490 as = new_es;
491 return true;
492}
493
496{
498 IntervalValue& switch_cond = new_es[var->getId()].getInterval();
499 s64_t value = succ;
501 for (SVFStmt *cmpVarInStmt: var->getInEdges())
502 {
504 }
505 switch_cond.meet_with(IntervalValue(value, value));
506 if (switch_cond.isBottom())
507 {
508 return false;
509 }
510 while(!workList.empty())
511 {
512 const SVFStmt* stmt = workList.pop();
513 if (SVFUtil::isa<CopyStmt>(stmt))
514 {
515 IntervalValue& copy_cond = new_es[var->getId()].getInterval();
516 copy_cond.meet_with(IntervalValue(value, value));
517 }
518 else if (const LoadStmt* load = SVFUtil::dyn_cast<LoadStmt>(stmt))
519 {
520 if (new_es.inVarToAddrsTable(load->getRHSVarID()))
521 {
522 AddressValue &addrs = new_es[load->getRHSVarID()].getAddrs();
523 for (const auto &addr: addrs)
524 {
525 NodeID objId = new_es.getIDFromAddr(addr);
526 if (new_es.inAddrToValTable(objId))
527 {
529 }
530 }
531 }
532 }
533 }
534 as = new_es;
535 return true;
536}
537
540{
541 const SVFVar *cmpVar = intraEdge->getCondition();
542 if (cmpVar->getInEdges().empty())
543 {
545 intraEdge->getSuccessorCondValue(), as);
546 }
547 else
548 {
549 assert(!cmpVar->getInEdges().empty() &&
550 "no in edges?");
551 SVFStmt *cmpVarInStmt = *cmpVar->getInEdges().begin();
552 if (const CmpStmt *cmpStmt = SVFUtil::dyn_cast<CmpStmt>(cmpVarInStmt))
553 {
555 intraEdge->getSuccessorCondValue(), as);
556 }
557 else
558 {
560 cmpVar, intraEdge->getSuccessorCondValue(), as);
561 }
562 }
563 return true;
564}
567{
568 const ICFGNode* node = icfgSingletonWto->getICFGNode();
569 stat->getBlockTrace()++;
570
571 std::deque<const ICFGNode*> worklist;
572
574 // handle SVF Stmt
575 for (const SVFStmt *stmt: node->getSVFStmts())
576 {
578 }
579 // inlining the callee by calling handleFunc for the callee function
580 if (const CallICFGNode* callnode = SVFUtil::dyn_cast<CallICFGNode>(node))
581 {
583 }
584 for (auto& detector: detectors)
585 detector->detect(getAbsStateFromTrace(node), node);
587}
588
592void AbstractInterpretation::handleWTOComponents(const std::list<const ICFGWTOComp*>& wtoComps)
593{
594 for (const ICFGWTOComp* wtoNode : wtoComps)
595 {
597 }
598}
599
601{
602 if (const ICFGSingletonWTO* node = SVFUtil::dyn_cast<ICFGSingletonWTO>(wtoNode))
603 {
604 if (mergeStatesFromPredecessors(node->getICFGNode()))
605 handleSingletonWTO(node);
606 }
607 // Handle WTO cycles
608 else if (const ICFGCycleWTO* cycle = SVFUtil::dyn_cast<ICFGCycleWTO>(wtoNode))
609 {
610 if (mergeStatesFromPredecessors(cycle->head()->getICFGNode()))
612 }
613 // Assert false for unknown WTO types
614 else
615 assert(false && "unknown WTO type!");
616}
617
619{
620 if (const CallICFGNode* callNode = SVFUtil::dyn_cast<CallICFGNode>(node))
621 {
622 if (isExtCall(callNode))
623 {
625 }
627 {
629 }
630 else if (isDirectCall(callNode))
631 {
633 }
634 else if (isIndirectCall(callNode))
635 {
637 }
638 else
639 assert(false && "implement this part");
640 }
641 else
642 assert (false && "it is not call node");
643}
644
646{
647 return SVFUtil::isExtCall(callNode->getCalledFunction());
648}
649
651{
652 callSiteStack.push_back(callNode);
654 for (auto& detector : detectors)
655 {
656 detector->handleStubFunctions(callNode);
657 }
658 callSiteStack.pop_back();
659}
660
662{
663 return recursiveFuns.find(fun) != recursiveFuns.end();
664}
665
667{
668 const FunObjVar *callfun = callNode->getCalledFunction();
669 if (!callfun)
670 return false;
671 else
672 return isRecursiveFun(callfun);
673}
674
676{
679 const RetICFGNode *retNode = callNode->getRetICFGNode();
680 if (retNode->getSVFStmts().size() > 0)
681 {
682 if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(*retNode->getSVFStmts().begin()))
683 {
684 if (!retPE->getLHSVar()->isPointer() &&
685 !retPE->getLHSVar()->isConstDataOrAggDataButNotNullPtr())
686 {
687 as[retPE->getLHSVarID()] = IntervalValue::top();
688 }
689 }
690 }
692}
693
700
702{
703 const FunObjVar *callfun =callNode->getCalledFunction();
704 if (!callfun)
705 return false;
706 else
707 return !callfun->isDeclaration();
708}
710{
712
714
715 const FunObjVar *calleeFun =callNode->getCalledFunction();
717 {
719 return;
720 }
721
722 callSiteStack.push_back(callNode);
723
724 const ICFGWTO* wto = funcToWTO[calleeFun];
725 handleWTOComponents(wto->getWTOComponents());
726
727 callSiteStack.pop_back();
728 // handle Ret node
729 const RetICFGNode *retNode = callNode->getRetICFGNode();
730 // resume ES to callnode
732}
733
739
741{
745 if (!as.inVarToAddrsTable(call_id))
746 {
747 return;
748 }
751 SVFVar *func_var = svfir->getGNode(as.getIDFromAddr(addr));
752
753 if(const FunObjVar* funObjVar = SVFUtil::dyn_cast<FunObjVar>(func_var))
754 {
756 {
757 if (isRecursiveCallSite(callNode, funObjVar))
758 return;
759 }
760
761 const FunObjVar* callfun = funObjVar->getFunction();
762 callSiteStack.push_back(callNode);
764
765 const ICFGWTO* wto = funcToWTO[callfun];
766 handleWTOComponents(wto->getWTOComponents());
767 callSiteStack.pop_back();
768 // handle Ret node
769 const RetICFGNode* retNode = callNode->getRetICFGNode();
771 }
772}
773
776{
777 const ICFGNode* cycle_head = cycle->head()->getICFGNode();
778 // Flag to indicate if we are in the increasing phase
779 bool increasing = true;
780 // Infinite loop until a fixpoint is reached,
781 for (u32_t cur_iter = 0;; cur_iter++)
782 {
783 // Start widening or narrowing if cur_iter >= widen threshold (widen delay)
785 {
786 // Widen or narrow after processing cycle head node
788 handleWTOComponent(cycle->head());
790 if (increasing)
791 {
792 // Widening, use different modes for nodes within recursions
793 if (isRecursiveFun(cycle->head()->getICFGNode()->getFun()))
794 {
795 // For nodes in recursions, widen to top in WIDEN_TOP mode
797 {
799 }
800 // Perform normal widening in WIDEN_NARROW mode
802 {
804 }
805 // In TOP mode, skipRecursiveCall will handle recursions,
806 // thus should not reach this branch
807 else
808 {
809 assert(false && "Recursion mode TOP should not reach here!");
810 }
811 }
812 // For nodes outside recursions, perform normal widening
813 else
814 {
816 }
817
819 {
820 increasing = false;
821 continue;
822 }
823 }
824 else
825 {
826 // Narrowing, use different modes for nodes within recursions
827 if (isRecursiveFun(cycle->head()->getICFGNode()->getFun()))
828 {
829 // For nodes in recursions, skip narrowing in WIDEN_TOP mode
831 {
832 break;
833 }
834 // Perform normal narrowing in WIDEN_NARROW mode
836 {
837 // Widening's fixpoint reached in the widening phase, switch to narrowing
840 {
841 // Narrowing's fixpoint reached in the narrowing phase, exit loop
842 break;
843 }
844 }
845 // In TOP mode, skipRecursiveCall will handle recursions,
846 // thus should not reach this branch
847 else
848 {
849 assert(false && "Recursion mode TOP should not reach here");
850 }
851 }
852 // For nodes outside recursions, perform normal narrowing
853 else
854 {
855 // Widening's fixpoint reached in the widening phase, switch to narrowing
858 {
859 // Narrowing's fixpoint reached in the narrowing phase, exit loop
860 break;
861 }
862 }
863 }
864 }
865 else
866 {
867 // Handle the cycle head
868 handleWTOComponent(cycle->head());
869 }
870 // Handle the cycle body
871 handleWTOComponents(cycle->getWTOComponents());
872 }
873}
874
876{
877 if (const AddrStmt *addr = SVFUtil::dyn_cast<AddrStmt>(stmt))
878 {
880 }
881 else if (const BinaryOPStmt *binary = SVFUtil::dyn_cast<BinaryOPStmt>(stmt))
882 {
884 }
885 else if (const CmpStmt *cmp = SVFUtil::dyn_cast<CmpStmt>(stmt))
886 {
888 }
889 else if (SVFUtil::isa<UnaryOPStmt>(stmt))
890 {
891 }
892 else if (SVFUtil::isa<BranchStmt>(stmt))
893 {
894 // branch stmt is handled in hasBranchES
895 }
896 else if (const LoadStmt *load = SVFUtil::dyn_cast<LoadStmt>(stmt))
897 {
898 updateStateOnLoad(load);
899 }
900 else if (const StoreStmt *store = SVFUtil::dyn_cast<StoreStmt>(stmt))
901 {
902 updateStateOnStore(store);
903 }
904 else if (const CopyStmt *copy = SVFUtil::dyn_cast<CopyStmt>(stmt))
905 {
907 }
908 else if (const GepStmt *gep = SVFUtil::dyn_cast<GepStmt>(stmt))
909 {
911 }
912 else if (const SelectStmt *select = SVFUtil::dyn_cast<SelectStmt>(stmt))
913 {
915 }
916 else if (const PhiStmt *phi = SVFUtil::dyn_cast<PhiStmt>(stmt))
917 {
919 }
920 else if (const CallPE *callPE = SVFUtil::dyn_cast<CallPE>(stmt))
921 {
922 // To handle Call Edge
924 }
925 else if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(stmt))
926 {
927 updateStateOnRet(retPE);
928 }
929 else
930 assert(false && "implement this part");
931 // NullPtr is index 0, it should not be changed
932 assert(!getAbsStateFromTrace(stmt->getICFGNode())[IRGraph::NullPtr].isInterval() &&
933 !getAbsStateFromTrace(stmt->getICFGNode())[IRGraph::NullPtr].isAddr());
934}
935
937{
939 const RetICFGNode *retNode = callNode->getRetICFGNode();
940 if (retNode->getSVFStmts().size() > 0)
941 {
942 if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(*retNode->getSVFStmts().begin()))
943 {
945 if (!retPE->getLHSVar()->isPointer() && !retPE->getLHSVar()->isConstDataOrAggDataButNotNullPtr())
946 as[retPE->getLHSVarID()] = IntervalValue::top();
947 }
948 }
949 if (!retNode->getOutEdges().empty())
950 {
951 if (retNode->getOutEdges().size() == 1)
952 {
953
954 }
955 else
956 {
957 return;
958 }
959 }
962 for (const SVFBasicBlock * bb: callNode->getCalledFunction()->getReachableBBs())
963 {
964 for (const ICFGNode* node: bb->getICFGNodeList())
965 {
966 for (const SVFStmt *stmt: node->getSVFStmts())
967 {
968 if (const StoreStmt *store = SVFUtil::dyn_cast<StoreStmt>(stmt))
969 {
970 const SVFVar *rhsVar = store->getRHSVar();
971 u32_t lhs = store->getLHSVarID();
972 if (as.inVarToAddrsTable(lhs))
973 {
974 if (!rhsVar->isPointer() && !rhsVar->isConstDataOrAggDataButNotNullPtr())
975 {
976 const AbstractValue &addrs = as[lhs];
977 for (const auto &addr: addrs.getAddrs())
978 {
979 as.store(addr, IntervalValue::top());
980 }
981 }
982 }
983 }
984 }
985 }
986 }
987}
988
989// count the size of memory map
991{
992 if (count == 0)
993 {
994 generalNumMap["ES_Var_AVG_Num"] = 0;
995 generalNumMap["ES_Loc_AVG_Num"] = 0;
996 generalNumMap["ES_Var_Addr_AVG_Num"] = 0;
997 generalNumMap["ES_Loc_Addr_AVG_Num"] = 0;
998 }
999 ++count;
1000}
1001
1003{
1005 if (count > 0)
1006 {
1007 generalNumMap["ES_Var_AVG_Num"] /= count;
1008 generalNumMap["ES_Loc_AVG_Num"] /= count;
1009 generalNumMap["ES_Var_Addr_AVG_Num"] /= count;
1010 generalNumMap["ES_Loc_Addr_AVG_Num"] /= count;
1011 }
1012 generalNumMap["SVF_STMT_NUM"] = count;
1013 generalNumMap["ICFG_Node_Num"] = _ae->svfir->getICFG()->nodeNum;
1014 u32_t callSiteNum = 0;
1017 for (const auto &it: *_ae->svfir->getICFG())
1018 {
1019 if (it.second->getFun())
1020 {
1021 funs.insert(it.second->getFun());
1022 }
1023 if (const CallICFGNode *callNode = dyn_cast<CallICFGNode>(it.second))
1024 {
1025 if (!isExtCall(callNode))
1026 {
1027 callSiteNum++;
1028 }
1029 else
1030 {
1032 }
1033 }
1034 }
1035 generalNumMap["Func_Num"] = funs.size();
1036 generalNumMap["EXT_CallSite_Num"] = extCallSiteNum;
1037 generalNumMap["NonEXT_CallSite_Num"] = callSiteNum;
1038 timeStatMap["Total_Time(sec)"] = (double)(endTime - startTime) / TIMEINTERVAL;
1039
1040}
1041
1043{
1044 std::string fullName(_ae->moduleName);
1045 std::string name;
1046 std::string moduleName;
1047 if (fullName.find('/') == std::string::npos)
1048 {
1049 std::string name = fullName;
1050 moduleName = name.substr(0, fullName.find('.'));
1051 }
1052 else
1053 {
1054 std::string name = fullName.substr(fullName.find('/'), fullName.size());
1055 moduleName = name.substr(0, fullName.find('.'));
1056 }
1057
1058 SVFUtil::outs() << "\n************************\n";
1059 SVFUtil::outs() << "################ (program : " << moduleName << ")###############\n";
1060 SVFUtil::outs().flags(std::ios::left);
1061 unsigned field_width = 30;
1062 for (NUMStatMap::iterator it = generalNumMap.begin(), eit = generalNumMap.end(); it != eit; ++it)
1063 {
1064 // format out put with width 20 space
1065 std::cout << std::setw(field_width) << it->first << it->second << "\n";
1066 }
1067 SVFUtil::outs() << "-------------------------------------------------------\n";
1068 for (TIMEStatMap::iterator it = timeStatMap.begin(), eit = timeStatMap.end(); it != eit; ++it)
1069 {
1070 // format out put with width 20 space
1071 SVFUtil::outs() << std::setw(field_width) << it->first << it->second << "\n";
1072 }
1073 SVFUtil::outs() << "Memory usage: " << memUsage << "\n";
1074
1075 SVFUtil::outs() << "#######################################################" << std::endl;
1076 SVFUtil::outs().flush();
1077}
1078
1080{
1081 // traverse every ICFGNode
1082 Set<std::string> ae_checkpoint_names = {"svf_assert"};
1083 Set<std::string> buf_checkpoint_names = {"UNSAFE_BUFACCESS", "SAFE_BUFACCESS"};
1084 Set<std::string> nullptr_checkpoint_names = {"UNSAFE_LOAD", "SAFE_LOAD"};
1085
1086 for (auto it = svfir->getICFG()->begin(); it != svfir->getICFG()->end(); ++it)
1087 {
1088 const ICFGNode* node = it->second;
1089 if (const CallICFGNode *call = SVFUtil::dyn_cast<CallICFGNode>(node))
1090 {
1091 if (const FunObjVar *fun = call->getCalledFunction())
1092 {
1093 if (ae_checkpoint_names.find(fun->getName()) !=
1094 ae_checkpoint_names.end())
1095 {
1096 checkpoints.insert(call);
1097 }
1099 {
1100 if (buf_checkpoint_names.find(fun->getName()) !=
1102 {
1103 checkpoints.insert(call);
1104 }
1105 }
1107 {
1108 if (nullptr_checkpoint_names.find(fun->getName()) !=
1110 {
1111 checkpoints.insert(call);
1112 }
1113 }
1114 }
1115 }
1116 }
1117}
1118
1120{
1121 if (checkpoints.size() == 0)
1122 {
1123 return;
1124 }
1125 else
1126 {
1127 SVFUtil::errs() << SVFUtil::errMsg("At least one svf_assert has not been checked!!") << "\n";
1128 for (const CallICFGNode* call: checkpoints)
1129 SVFUtil::errs() << call->toString() + "\n";
1130 assert(false);
1131 }
1132
1133}
1134
1136{
1137 AbstractState& as = getAbsStateFromTrace(gep->getICFGNode());
1138 u32_t rhs = gep->getRHSVarID();
1139 u32_t lhs = gep->getLHSVarID();
1140 IntervalValue offsetPair = as.getElementIndex(gep);
1142 APOffset lb = offsetPair.lb().getIntNumeral() < Options::MaxFieldLimit()?
1143 offsetPair.lb().getIntNumeral(): Options::MaxFieldLimit();
1144 APOffset ub = offsetPair.ub().getIntNumeral() < Options::MaxFieldLimit()?
1145 offsetPair.ub().getIntNumeral(): Options::MaxFieldLimit();
1146 for (APOffset i = lb; i <= ub; i++)
1147 gepAddrs.join_with(as.getGepObjAddrs(rhs,IntervalValue(i)));
1148 as[lhs] = gepAddrs;
1149}
1150
1152{
1153 AbstractState& as = getAbsStateFromTrace(select->getICFGNode());
1154 u32_t res = select->getResID();
1155 u32_t tval = select->getTrueValue()->getId();
1156 u32_t fval = select->getFalseValue()->getId();
1157 u32_t cond = select->getCondition()->getId();
1158 if (as[cond].getInterval().is_numeral())
1159 {
1160 as[res] = as[cond].getInterval().is_zero() ? as[fval] : as[tval];
1161 }
1162 else
1163 {
1164 as[res] = as[tval];
1165 as[res].join_with(as[fval]);
1166 }
1167}
1168
1170{
1171 const ICFGNode* icfgNode = phi->getICFGNode();
1173 u32_t res = phi->getResID();
1175 for (u32_t i = 0; i < phi->getOpVarNum(); i++)
1176 {
1177 NodeID curId = phi->getOpVarID(i);
1178 const ICFGNode* opICFGNode = phi->getOpICFGNode(i);
1180 {
1184 // if IntraEdge, check the condition, if it is feasible, join the value
1185 // if IntraEdge but not conditional edge, join the value
1186 // if not IntraEdge, join the value
1187 if (edge)
1188 {
1189 const IntraCFGEdge* intraEdge = SVFUtil::cast<IntraCFGEdge>(edge);
1190 if (intraEdge->getCondition())
1191 {
1193 rhs.join_with(opAs[curId]);
1194 }
1195 else
1196 rhs.join_with(opAs[curId]);
1197 }
1198 else
1199 {
1200 rhs.join_with(opAs[curId]);
1201 }
1202 }
1203 }
1204 as[res] = rhs;
1205}
1206
1207
1209{
1210 AbstractState& as = getAbsStateFromTrace(callPE->getICFGNode());
1211 NodeID lhs = callPE->getLHSVarID();
1212 NodeID rhs = callPE->getRHSVarID();
1213 as[lhs] = as[rhs];
1214}
1215
1217{
1219 NodeID lhs = retPE->getLHSVarID();
1220 NodeID rhs = retPE->getRHSVarID();
1221 as[lhs] = as[rhs];
1222}
1223
1224
1226{
1227 AbstractState& as = getAbsStateFromTrace(addr->getICFGNode());
1228 as.initObjVar(SVFUtil::cast<ObjVar>(addr->getRHSVar()));
1229 if (addr->getRHSVar()->getType()->getKind() == SVFType::SVFIntegerTy)
1230 as[addr->getRHSVarID()].getInterval().meet_with(utils->getRangeLimitFromType(addr->getRHSVar()->getType()));
1231 as[addr->getLHSVarID()] = as[addr->getRHSVarID()];
1232}
1233
1234
1236{
1240 const ICFGNode* node = binary->getICFGNode();
1242 u32_t op0 = binary->getOpVarID(0);
1243 u32_t op1 = binary->getOpVarID(1);
1244 u32_t res = binary->getResID();
1245 if (!as.inVarToValTable(op0)) as[op0] = IntervalValue::top();
1246 if (!as.inVarToValTable(op1)) as[op1] = IntervalValue::top();
1247 IntervalValue &lhs = as[op0].getInterval(), &rhs = as[op1].getInterval();
1249 switch (binary->getOpcode())
1250 {
1251 case BinaryOPStmt::Add:
1252 case BinaryOPStmt::FAdd:
1253 resVal = (lhs + rhs);
1254 break;
1255 case BinaryOPStmt::Sub:
1256 case BinaryOPStmt::FSub:
1257 resVal = (lhs - rhs);
1258 break;
1259 case BinaryOPStmt::Mul:
1260 case BinaryOPStmt::FMul:
1261 resVal = (lhs * rhs);
1262 break;
1263 case BinaryOPStmt::SDiv:
1264 case BinaryOPStmt::FDiv:
1265 case BinaryOPStmt::UDiv:
1266 resVal = (lhs / rhs);
1267 break;
1268 case BinaryOPStmt::SRem:
1269 case BinaryOPStmt::FRem:
1270 case BinaryOPStmt::URem:
1271 resVal = (lhs % rhs);
1272 break;
1273 case BinaryOPStmt::Xor:
1274 resVal = (lhs ^ rhs);
1275 break;
1276 case BinaryOPStmt::And:
1277 resVal = (lhs & rhs);
1278 break;
1279 case BinaryOPStmt::Or:
1280 resVal = (lhs | rhs);
1281 break;
1282 case BinaryOPStmt::AShr:
1283 resVal = (lhs >> rhs);
1284 break;
1285 case BinaryOPStmt::Shl:
1286 resVal = (lhs << rhs);
1287 break;
1288 case BinaryOPStmt::LShr:
1289 resVal = (lhs >> rhs);
1290 break;
1291 default:
1292 assert(false && "undefined binary: ");
1293 }
1294 as[res] = resVal;
1295}
1296
1298{
1299 AbstractState& as = getAbsStateFromTrace(cmp->getICFGNode());
1300 u32_t op0 = cmp->getOpVarID(0);
1301 u32_t op1 = cmp->getOpVarID(1);
1302 // if it is address
1303 if (as.inVarToAddrsTable(op0) && as.inVarToAddrsTable(op1))
1304 {
1306 AddressValue addrOp0 = as[op0].getAddrs();
1307 AddressValue addrOp1 = as[op1].getAddrs();
1308 u32_t res = cmp->getResID();
1309 if (addrOp0.equals(addrOp1))
1310 {
1311 resVal = IntervalValue(1, 1);
1312 }
1313 else if (addrOp0.hasIntersect(addrOp1))
1314 {
1315 resVal = IntervalValue(0, 1);
1316 }
1317 else
1318 {
1319 resVal = IntervalValue(0, 0);
1320 }
1321 as[res] = resVal;
1322 }
1323 // if op0 or op1 is nullptr, compare abstractValue instead of touching addr or interval
1324 else if (op0 == IRGraph::NullPtr || op1 == IRGraph::NullPtr)
1325 {
1326 u32_t res = cmp->getResID();
1327 IntervalValue resVal = (as[op0].equals(as[op1])) ? IntervalValue(1, 1) : IntervalValue(0, 0);
1328 as[res] = resVal;
1329 }
1330 else
1331 {
1332 if (!as.inVarToValTable(op0))
1334 if (!as.inVarToValTable(op1))
1336 u32_t res = cmp->getResID();
1337 if (as.inVarToValTable(op0) && as.inVarToValTable(op1))
1338 {
1340 if (as[op0].isInterval() && as[op1].isInterval())
1341 {
1342 IntervalValue &lhs = as[op0].getInterval(),
1343 &rhs = as[op1].getInterval();
1344 // AbstractValue
1345 auto predicate = cmp->getPredicate();
1346 switch (predicate)
1347 {
1348 case CmpStmt::ICMP_EQ:
1349 case CmpStmt::FCMP_OEQ:
1350 case CmpStmt::FCMP_UEQ:
1351 resVal = (lhs == rhs);
1352 // resVal = (lhs.getInterval() == rhs.getInterval());
1353 break;
1354 case CmpStmt::ICMP_NE:
1355 case CmpStmt::FCMP_ONE:
1356 case CmpStmt::FCMP_UNE:
1357 resVal = (lhs != rhs);
1358 break;
1359 case CmpStmt::ICMP_UGT:
1360 case CmpStmt::ICMP_SGT:
1361 case CmpStmt::FCMP_OGT:
1362 case CmpStmt::FCMP_UGT:
1363 resVal = (lhs > rhs);
1364 break;
1365 case CmpStmt::ICMP_UGE:
1366 case CmpStmt::ICMP_SGE:
1367 case CmpStmt::FCMP_OGE:
1368 case CmpStmt::FCMP_UGE:
1369 resVal = (lhs >= rhs);
1370 break;
1371 case CmpStmt::ICMP_ULT:
1372 case CmpStmt::ICMP_SLT:
1373 case CmpStmt::FCMP_OLT:
1374 case CmpStmt::FCMP_ULT:
1375 resVal = (lhs < rhs);
1376 break;
1377 case CmpStmt::ICMP_ULE:
1378 case CmpStmt::ICMP_SLE:
1379 case CmpStmt::FCMP_OLE:
1380 case CmpStmt::FCMP_ULE:
1381 resVal = (lhs <= rhs);
1382 break;
1384 resVal = IntervalValue(0, 0);
1385 break;
1386 case CmpStmt::FCMP_TRUE:
1387 resVal = IntervalValue(1, 1);
1388 break;
1389 default:
1390 assert(false && "undefined compare: ");
1391 }
1392 as[res] = resVal;
1393 }
1394 else if (as[op0].isAddr() && as[op1].isAddr())
1395 {
1396 AddressValue &lhs = as[op0].getAddrs(),
1397 &rhs = as[op1].getAddrs();
1398 auto predicate = cmp->getPredicate();
1399 switch (predicate)
1400 {
1401 case CmpStmt::ICMP_EQ:
1402 case CmpStmt::FCMP_OEQ:
1403 case CmpStmt::FCMP_UEQ:
1404 {
1405 if (lhs.hasIntersect(rhs))
1406 {
1407 resVal = IntervalValue(0, 1);
1408 }
1409 else if (lhs.empty() && rhs.empty())
1410 {
1411 resVal = IntervalValue(1, 1);
1412 }
1413 else
1414 {
1415 resVal = IntervalValue(0, 0);
1416 }
1417 break;
1418 }
1419 case CmpStmt::ICMP_NE:
1420 case CmpStmt::FCMP_ONE:
1421 case CmpStmt::FCMP_UNE:
1422 {
1423 if (lhs.hasIntersect(rhs))
1424 {
1425 resVal = IntervalValue(0, 1);
1426 }
1427 else if (lhs.empty() && rhs.empty())
1428 {
1429 resVal = IntervalValue(0, 0);
1430 }
1431 else
1432 {
1433 resVal = IntervalValue(1, 1);
1434 }
1435 break;
1436 }
1437 case CmpStmt::ICMP_UGT:
1438 case CmpStmt::ICMP_SGT:
1439 case CmpStmt::FCMP_OGT:
1440 case CmpStmt::FCMP_UGT:
1441 {
1442 if (lhs.size() == 1 && rhs.size() == 1)
1443 {
1444 resVal = IntervalValue(*lhs.begin() > *rhs.begin());
1445 }
1446 else
1447 {
1448 resVal = IntervalValue(0, 1);
1449 }
1450 break;
1451 }
1452 case CmpStmt::ICMP_UGE:
1453 case CmpStmt::ICMP_SGE:
1454 case CmpStmt::FCMP_OGE:
1455 case CmpStmt::FCMP_UGE:
1456 {
1457 if (lhs.size() == 1 && rhs.size() == 1)
1458 {
1459 resVal = IntervalValue(*lhs.begin() >= *rhs.begin());
1460 }
1461 else
1462 {
1463 resVal = IntervalValue(0, 1);
1464 }
1465 break;
1466 }
1467 case CmpStmt::ICMP_ULT:
1468 case CmpStmt::ICMP_SLT:
1469 case CmpStmt::FCMP_OLT:
1470 case CmpStmt::FCMP_ULT:
1471 {
1472 if (lhs.size() == 1 && rhs.size() == 1)
1473 {
1474 resVal = IntervalValue(*lhs.begin() < *rhs.begin());
1475 }
1476 else
1477 {
1478 resVal = IntervalValue(0, 1);
1479 }
1480 break;
1481 }
1482 case CmpStmt::ICMP_ULE:
1483 case CmpStmt::ICMP_SLE:
1484 case CmpStmt::FCMP_OLE:
1485 case CmpStmt::FCMP_ULE:
1486 {
1487 if (lhs.size() == 1 && rhs.size() == 1)
1488 {
1489 resVal = IntervalValue(*lhs.begin() <= *rhs.begin());
1490 }
1491 else
1492 {
1493 resVal = IntervalValue(0, 1);
1494 }
1495 break;
1496 }
1498 resVal = IntervalValue(0, 0);
1499 break;
1500 case CmpStmt::FCMP_TRUE:
1501 resVal = IntervalValue(1, 1);
1502 break;
1503 default:
1504 assert(false && "undefined compare: ");
1505 }
1506 as[res] = resVal;
1507 }
1508 }
1509 }
1510}
1511
1513{
1515 u32_t rhs = load->getRHSVarID();
1516 u32_t lhs = load->getLHSVarID();
1517 as[lhs] = as.loadValue(rhs);
1518}
1519
1521{
1523 u32_t rhs = store->getRHSVarID();
1524 u32_t lhs = store->getLHSVarID();
1525 as.storeValue(lhs, as[rhs]);
1526}
1527
1529{
1530 auto getZExtValue = [](AbstractState& as, const SVFVar* var)
1531 {
1532 const SVFType* type = var->getType();
1533 if (SVFUtil::isa<SVFIntegerType>(type))
1534 {
1535 u32_t bits = type->getByteSize() * 8;
1536 if (as[var->getId()].getInterval().is_numeral())
1537 {
1538 if (bits == 8)
1539 {
1540 int8_t signed_i8_value = as[var->getId()].getInterval().getIntNumeral();
1543 }
1544 else if (bits == 16)
1545 {
1546 s16_t signed_i16_value = as[var->getId()].getInterval().getIntNumeral();
1549 }
1550 else if (bits == 32)
1551 {
1552 s32_t signed_i32_value = as[var->getId()].getInterval().getIntNumeral();
1555 }
1556 else if (bits == 64)
1557 {
1558 s64_t signed_i64_value = as[var->getId()].getInterval().getIntNumeral();
1560 // we only support i64 at most
1561 }
1562 else
1563 assert(false && "cannot support int type other than u8/16/32/64");
1564 }
1565 else
1566 {
1567 return IntervalValue::top(); // TODO: may have better solution
1568 }
1569 }
1570 return IntervalValue::top(); // TODO: may have better solution
1571 };
1572
1573 auto getTruncValue = [&](const AbstractState& as, const SVFVar* var,
1574 const SVFType* dstType)
1575 {
1576 const IntervalValue& itv = as[var->getId()].getInterval();
1577 if(itv.isBottom()) return itv;
1578 // get the value of ub and lb
1580 s64_t int_ub = itv.ub().getIntNumeral();
1581 // get dst type
1582 u32_t dst_bits = dstType->getByteSize() * 8;
1583 if (dst_bits == 8)
1584 {
1585 // get the signed value of ub and lb
1586 int8_t s8_lb = static_cast<int8_t>(int_lb);
1587 int8_t s8_ub = static_cast<int8_t>(int_ub);
1588 if (s8_lb > s8_ub)
1589 {
1590 // return range of s8
1592 }
1593 return IntervalValue(s8_lb, s8_ub);
1594 }
1595 else if (dst_bits == 16)
1596 {
1597 // get the signed value of ub and lb
1598 s16_t s16_lb = static_cast<s16_t>(int_lb);
1599 s16_t s16_ub = static_cast<s16_t>(int_ub);
1600 if (s16_lb > s16_ub)
1601 {
1602 // return range of s16
1604 }
1605 return IntervalValue(s16_lb, s16_ub);
1606 }
1607 else if (dst_bits == 32)
1608 {
1609 // get the signed value of ub and lb
1610 s32_t s32_lb = static_cast<s32_t>(int_lb);
1611 s32_t s32_ub = static_cast<s32_t>(int_ub);
1612 if (s32_lb > s32_ub)
1613 {
1614 // return range of s32
1616 }
1617 return IntervalValue(s32_lb, s32_ub);
1618 }
1619 else
1620 {
1621 assert(false && "cannot support dst int type other than u8/16/32");
1622 abort();
1623 }
1624 };
1625
1626 AbstractState& as = getAbsStateFromTrace(copy->getICFGNode());
1627 u32_t lhs = copy->getLHSVarID();
1628 u32_t rhs = copy->getRHSVarID();
1629
1630 if (copy->getCopyKind() == CopyStmt::COPYVAL)
1631 {
1632 as[lhs] = as[rhs];
1633 }
1634 else if (copy->getCopyKind() == CopyStmt::ZEXT)
1635 {
1636 as[lhs] = getZExtValue(as, copy->getRHSVar());
1637 }
1638 else if (copy->getCopyKind() == CopyStmt::SEXT)
1639 {
1640 as[lhs] = as[rhs].getInterval();
1641 }
1642 else if (copy->getCopyKind() == CopyStmt::FPTOSI)
1643 {
1644 as[lhs] = as[rhs].getInterval();
1645 }
1646 else if (copy->getCopyKind() == CopyStmt::FPTOUI)
1647 {
1648 as[lhs] = as[rhs].getInterval();
1649 }
1650 else if (copy->getCopyKind() == CopyStmt::SITOFP)
1651 {
1652 as[lhs] = as[rhs].getInterval();
1653 }
1654 else if (copy->getCopyKind() == CopyStmt::UITOFP)
1655 {
1656 as[lhs] = as[rhs].getInterval();
1657 }
1658 else if (copy->getCopyKind() == CopyStmt::TRUNC)
1659 {
1660 as[lhs] = getTruncValue(as, copy->getRHSVar(), copy->getLHSVar()->getType());
1661 }
1662 else if (copy->getCopyKind() == CopyStmt::FPTRUNC)
1663 {
1664 as[lhs] = as[rhs].getInterval();
1665 }
1666 else if (copy->getCopyKind() == CopyStmt::INTTOPTR)
1667 {
1668 //insert nullptr
1669 }
1670 else if (copy->getCopyKind() == CopyStmt::PTRTOINT)
1671 {
1673 }
1674 else if (copy->getCopyKind() == CopyStmt::BITCAST)
1675 {
1676 if (as[rhs].isAddr())
1677 {
1678 as[lhs] = as[rhs];
1679 }
1680 else
1681 {
1682 // do nothing
1683 }
1684 }
1685 else
1686 assert(false && "undefined copy kind");
1687}
1688
1689
1690
#define TIMEINTERVAL
Definition SVFType.h:540
newitem type
Definition cJSON.cpp:2739
copy
Definition cJSON.cpp:414
const char *const name
Definition cJSON.h:264
void performStat() override
std::string getMemUsage()
AbstractInterpretation * _ae
Handles external API calls and manages abstract states.
Definition AbsExtAPI.h:44
void handleExtAPI(const CallICFGNode *call)
Handles an external API call.
IntervalValue getRangeLimitFromType(const SVFType *type)
Gets the range limit from a type.
void updateStateOnCall(const CallPE *callPE)
virtual bool isRecursiveCall(const CallICFGNode *callNode)
virtual void recursiveCallPass(const CallICFGNode *callNode)
void updateStateOnStore(const StoreStmt *store)
virtual bool isDirectCall(const CallICFGNode *callNode)
bool hasAbsStateFromTrace(const ICFGNode *node)
virtual void handleCycleWTO(const ICFGCycleWTO *cycle)
handle wto cycle (loop)
Set< const FunObjVar * > recursiveFuns
void updateStateOnGep(const GepStmt *gep)
virtual void extCallPass(const CallICFGNode *callNode)
virtual void handleGlobalNode()
Global ICFGNode is handled at the entry of the program,.
bool mergeStatesFromPredecessors(const ICFGNode *icfgNode)
virtual void directCallFunPass(const CallICFGNode *callNode)
void handleWTOComponent(const ICFGWTOComp *wtoComp)
virtual bool isExtCall(const CallICFGNode *callNode)
virtual bool isIndirectCall(const CallICFGNode *callNode)
void initWTO()
Compute IWTO for each function partition entry.
void updateStateOnPhi(const PhiStmt *phi)
bool isBranchFeasible(const IntraCFGEdge *intraEdge, AbstractState &as)
std::vector< std::unique_ptr< AEDetector > > detectors
AbstractState & getAbsStateFromTrace(const ICFGNode *node)
Retrieves the abstract state from the trace for a given ICFG node.
Set< const CallICFGNode * > checkpoints
bool isSwitchBranchFeasible(const SVFVar *var, s64_t succ, AbstractState &as)
void updateStateOnSelect(const SelectStmt *select)
virtual void handleSVFStatement(const SVFStmt *stmt)
virtual void indirectCallFunPass(const CallICFGNode *callNode)
SVFIR * svfir
protected data members, also used in subclasses
virtual void runOnModule(ICFG *icfg)
virtual bool isRecursiveCallSite(const CallICFGNode *callNode, const FunObjVar *)
virtual bool isRecursiveFun(const FunObjVar *fun)
void updateStateOnAddr(const AddrStmt *addr)
virtual ~AbstractInterpretation()
Destructor.
bool isCmpBranchFeasible(const CmpStmt *cmpStmt, s64_t succ, AbstractState &as)
virtual void handleCallSite(const ICFGNode *node)
void updateStateOnRet(const RetPE *retPE)
void handleWTOComponents(const std::list< const ICFGWTOComp * > &wtoComps)
Handle two types of WTO components (singleton and cycle)
void updateStateOnCopy(const CopyStmt *copy)
Set< std::pair< const CallICFGNode *, NodeID > > nonRecursiveCallSites
void updateStateOnLoad(const LoadStmt *load)
void updateStateOnBinary(const BinaryOPStmt *binary)
Map< const ICFGNode *, AbstractState > abstractTrace
std::vector< const CallICFGNode * > callSiteStack
virtual void handleSingletonWTO(const ICFGSingletonWTO *icfgSingletonWto)
handle instructions in svf basic blocks
Map< s32_t, s32_t > _switch_lhsrhs_predicate
void updateStateOnCmp(const CmpStmt *cmp)
virtual void SkipRecursiveCall(const CallICFGNode *callnode)
Map< const FunObjVar *, const ICFGWTO * > funcToWTO
AbstractState widening(const AbstractState &other)
domain widen with other, and return the widened domain
AddressValue & getAddrs()
bool meet_with(const AddressValue &other)
Return a intersected AddressValue.
AddrSet::const_iterator begin() const
static AndersenWaveDiff * createAndersenWaveDiff(SVFIR *_pag)
Create an singleton instance directly instead of invoking llvm pass manager.
Definition Andersen.h:407
NodeID getRHSVarID() const
NodeID getLHSVarID() const
s64_t getIntNumeral() const
const CallGraphNode * getCallGraphNode(const std::string &name) const
Get call graph node.
@ ICMP_SGT
signed greater than
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ ICMP_UGE
unsigned greater or equal
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ ICMP_ULE
unsigned less or equal
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ ICMP_NE
not equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_ULT
unsigned less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ ICMP_SLT
signed less than
@ ICMP_UGT
unsigned greater than
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_SLE
signed less or equal
virtual const FunObjVar * getFunction() const
Get containing function, or null for globals/constants.
bool isDeclaration() const
iterator begin()
Iterators.
u32_t nodeNum
total num of edge
NodeType * getGNode(NodeID id) const
Get a node.
const GEdgeSetTy & getInEdges() const
virtual const std::string toString() const
Definition ICFG.cpp:60
const SVFStmtList & getSVFStmts() const
Definition ICFGNode.h:117
ICFGEdge * getICFGEdge(const ICFGNode *src, const ICFGNode *dst, ICFGEdge::ICFGEdgeK kind)
Get a SVFG edge according to src and dst.
Definition ICFG.cpp:311
FunEntryICFGNode * getFunEntryICFGNode(const FunObjVar *fun)
Add a function entry node.
Definition ICFG.cpp:242
GlobalICFGNode * getGlobalICFGNode() const
Definition ICFG.h:230
NodeID getBlkPtr() const
Definition IRGraph.h:255
void meet_with(const IntervalValue &other)
Return a intersected IntervalValue.
static BoundedInt minus_infinity()
Get minus infinity -inf.
static BoundedInt plus_infinity()
Get plus infinity +inf.
static IntervalValue top()
Create the IntervalValue [-inf, +inf].
const BoundedInt & lb() const
Return the lower bound.
static const OptionMap< u32_t > HandleRecur
recursion handling mode, Default: TOP
Definition Options.h:245
static const Option< u32_t > MaxFieldLimit
Maximum number of field derivations for an object.
Definition Options.h:35
static const Option< u32_t > WidenDelay
Definition Options.h:243
static const Option< bool > NullDerefCheck
nullptr dereference checker, Default: false
Definition Options.h:253
static const Option< bool > PStat
Definition Options.h:116
static const Option< bool > BufferOverflowCheck
buffer overflow checker, Default: false
Definition Options.h:251
CallGraph * getCallGraph() const
Return call graph.
SCCDetection< CallGraph * > CallGraphSCC
CallGraphSCC * getCallGraphSCC() const
Return call graph SCC.
const CallGraph * getCallGraph()
Get CG.
Definition SVFIR.h:180
ICFG * getICFG() const
Definition SVFIR.h:163
const CallSiteToFunPtrMap & getIndirectCallsites() const
Add/get indirect callsites.
Definition SVFIR.h:374
static SVFIR * getPAG(bool buildFromFile=false)
Singleton design here to make sure we only have one instance during any analysis.
Definition SVFIR.h:116
NUMStatMap generalNumMap
Definition SVFStat.h:76
virtual void endClk()
Definition SVFStat.h:63
std::string moduleName
Definition SVFStat.h:99
TIMEStatMap timeStatMap
Definition SVFStat.h:78
virtual void startClk()
Definition SVFStat.h:58
double endTime
Definition SVFStat.h:81
double startTime
Definition SVFStat.h:80
ICFGNode * getICFGNode() const
u32_t getByteSize() const
Definition SVFType.h:249
std::string errMsg(const std::string &msg)
Print error message by converting a string into red string output.
Definition SVFUtil.cpp:78
std::ostream & errs()
Overwrite llvm::errs()
Definition SVFUtil.h:58
bool isExtCall(const FunObjVar *fun)
Definition SVFUtil.cpp:437
std::ostream & outs()
Overwrite llvm::outs()
Definition SVFUtil.h:52
for isBitcode
Definition BasicTypes.h:68
u32_t NodeID
Definition GeneralType.h:56
s64_t APOffset
Definition GeneralType.h:60
signed short s16_t
Definition GeneralType.h:54
unsigned short u16_t
Definition GeneralType.h:53
llvm::IRBuilder IRBuilder
Definition BasicTypes.h:74
signed s32_t
Definition GeneralType.h:48
unsigned u32_t
Definition GeneralType.h:47
signed long long s64_t
Definition GeneralType.h:50