Static Value-Flow Analysis
Loading...
Searching...
No Matches
AbstractInterpretation.cpp
Go to the documentation of this file.
1//===- AbstractExecution.cpp -- Abstract Execution---------------------------------//
2//
3// SVF: Static Value-Flow Analysis
4//
5// Copyright (C) <2013-> <Yulei Sui>
6//
7
8// This program is free software: you can redistribute it and/or modify
9// it under the terms of the GNU Affero General Public License as published by
10// the Free Software Foundation, either version 3 of the License, or
11// (at your option) any later version.
12
13// This program is distributed in the hope that it will be useful,
14// but WITHOUT ANY WARRANTY; without even the implied warranty of
15// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16// GNU Affero General Public License for more details.
17
18// You should have received a copy of the GNU Affero General Public License
19// along with this program. If not, see <http://www.gnu.org/licenses/>.
20//
21//===----------------------------------------------------------------------===//
22
23
24//
25// Created by Jiawei Wang on 2024/1/10.
26//
27
29#include "AE/Svfexe/AbsExtAPI.h"
30#include "SVFIR/SVFIR.h"
31#include "Util/Options.h"
32#include "Util/WorkList.h"
33#include "Graphs/CallGraph.h"
34#include "WPA/Andersen.h"
35#include <cmath>
36
37using namespace SVF;
38using namespace SVFUtil;
39using namespace z3;
40
41
43{
44 stat->startClk();
45 icfg = _icfg;
48
51
52 analyse();
54 stat->endClk();
56 if (Options::PStat())
58 for (auto& detector: detectors)
59 detector->reportBug();
60}
61
68{
69 delete stat;
70 for (auto it: funcToWTO)
71 delete it.second;
72}
73
84{
86 // Detect if the call graph has cycles by finding its strongly connected components (SCC)
88 callGraphScc->find();
89 CallGraph* callGraph = ander->getCallGraph();
90
91 // Iterate through the call graph
92 for (auto it = callGraph->begin(); it != callGraph->end(); it++)
93 {
94 // Check if the current function is part of a cycle
95 if (callGraphScc->isInCycle(it->second->getId()))
96 recursiveFuns.insert(it->second->getFunction()); // Mark the function as recursive
97
98 // In TOP mode, calculate the WTO
100 {
101 if (it->second->getFunction()->isDeclaration())
102 continue;
103 auto* wto = new ICFGWTO(icfg, icfg->getFunEntryICFGNode(it->second->getFunction()));
104 wto->init();
105 funcToWTO[it->second->getFunction()] = wto;
106 }
107 // In WIDEN_TOP or WIDEN_NARROW mode, calculate the IWTO
108 else if (Options::HandleRecur() == WIDEN_ONLY ||
110 {
111 const FunObjVar *fun = it->second->getFunction();
112 if (fun->isDeclaration())
113 continue;
114
115 NodeID repNodeId = callGraphScc->repNode(it->second->getId());
116 auto cgSCCNodes = callGraphScc->subNodes(repNodeId);
117
118 // Identify if this node is an SCC entry (nodes who have incoming edges
119 // from nodes outside the SCC). Also identify non-recursive callsites.
120 bool isEntry = false;
121 if (it->second->getInEdges().empty())
122 isEntry = true;
123 for (auto inEdge: it->second->getInEdges())
124 {
125 NodeID srcNodeId = inEdge->getSrcID();
126 if (!cgSCCNodes.test(srcNodeId))
127 {
128 isEntry = true;
129 const CallICFGNode *callSite = nullptr;
130 if (inEdge->isDirectCallEdge())
131 callSite = *(inEdge->getDirectCalls().begin());
132 else if (inEdge->isIndirectCallEdge())
133 callSite = *(inEdge->getIndirectCalls().begin());
134 else
135 assert(false && "CallGraphEdge must "
136 "be either direct or indirect!");
137
139 {callSite, inEdge->getDstNode()->getFunction()->getId()});
140 }
141 }
142
143 // Compute IWTO for the function partition entered from each partition entry
144 if (isEntry)
145 {
147 cgSCCNodes, callGraph);
148 iwto->init();
149 funcToWTO[it->second->getFunction()] = iwto;
150 }
151 }
152 else
153 assert(false && "Invalid recursion mode specified!");
154 }
155}
156
159{
160 initWTO();
161 // handle Global ICFGNode of SVFModule
165 if (const CallGraphNode* cgn = svfir->getCallGraph()->getCallGraphNode("main"))
166 {
167 const ICFGWTO* wto = funcToWTO[cgn->getFunction()];
168 handleWTOComponents(wto->getWTOComponents());
169 }
170}
171
174{
175 const ICFGNode* node = icfg->getGlobalICFGNode();
178 // Global Node, we just need to handle addr, load, store, copy and gep
179 for (const SVFStmt *stmt: node->getSVFStmts())
180 {
182 }
183}
184
189{
190 std::vector<AbstractState> workList;
192 for (auto& edge: icfgNode->getInEdges())
193 {
194 if (abstractTrace.find(edge->getSrcNode()) != abstractTrace.end())
195 {
196
197 if (const IntraCFGEdge *intraCfgEdge =
198 SVFUtil::dyn_cast<IntraCFGEdge>(edge))
199 {
200 AbstractState tmpEs = abstractTrace[edge->getSrcNode()];
201 if (intraCfgEdge->getCondition())
202 {
204 {
205 workList.push_back(tmpEs);
206 }
207 else
208 {
209 // do nothing
210 }
211 }
212 else
213 {
214 workList.push_back(tmpEs);
215 }
216 }
217 else if (const CallCFGEdge *callCfgEdge =
218 SVFUtil::dyn_cast<CallCFGEdge>(edge))
219 {
220
221 // context insensitive implementation
222 workList.push_back(
223 abstractTrace[callCfgEdge->getSrcNode()]);
224 }
225 else if (const RetCFGEdge *retCfgEdge =
226 SVFUtil::dyn_cast<RetCFGEdge>(edge))
227 {
228 switch (Options::HandleRecur())
229 {
230 case TOP:
231 {
232 workList.push_back(abstractTrace[retCfgEdge->getSrcNode()]);
233 break;
234 }
235 case WIDEN_ONLY:
236 case WIDEN_NARROW:
237 {
238 const RetICFGNode* returnSite = SVFUtil::dyn_cast<RetICFGNode>(icfgNode);
239 const CallICFGNode* callSite = returnSite->getCallICFGNode();
241 workList.push_back(abstractTrace[retCfgEdge->getSrcNode()]);
242 }
243 }
244 }
245 else
246 assert(false && "Unhandled ICFGEdge type encountered!");
247 }
248 }
249 if (workList.size() == 0)
250 {
251 return false;
252 }
253 else
254 {
255 while (!workList.empty())
256 {
257 preAs.joinWith(workList.back());
258 workList.pop_back();
259 }
260 // Has ES on the in edges - Feasible block
261 // update post as
262 abstractTrace[icfgNode] = preAs;
263 return true;
264 }
265}
266
267
270{
272 // get cmp stmt's op0, op1, and predicate
273 NodeID op0 = cmpStmt->getOpVarID(0);
274 NodeID op1 = cmpStmt->getOpVarID(1);
275 NodeID res_id = cmpStmt->getResID();
276 s32_t predicate = cmpStmt->getPredicate();
277 // if op0 or op1 is nullptr, no need to change value, just copy the state
279 {
280 as = new_es;
281 return true;
282 }
283 // if op0 or op1 is undefined, return;
284 // skip address compare
285 if (new_es.inVarToAddrsTable(op0) || new_es.inVarToAddrsTable(op1))
286 {
287 as = new_es;
288 return true;
289 }
290 const LoadStmt *load_op0 = nullptr;
291 const LoadStmt *load_op1 = nullptr;
292 // get '%1 = load i32 s', and load inst may not exist
294 if (!loadVar0->getInEdges().empty())
295 {
296 SVFStmt *loadVar0InStmt = *loadVar0->getInEdges().begin();
297 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar0InStmt))
298 {
300 }
301 else if (const CopyStmt *copyStmt = SVFUtil::dyn_cast<CopyStmt>(loadVar0InStmt))
302 {
303 loadVar0 = svfir->getGNode(copyStmt->getRHSVarID());
304 if (!loadVar0->getInEdges().empty())
305 {
306 SVFStmt *loadVar0InStmt2 = *loadVar0->getInEdges().begin();
307 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar0InStmt2))
308 {
310 }
311 }
312 }
313 }
314
316 if (!loadVar1->getInEdges().empty())
317 {
318 SVFStmt *loadVar1InStmt = *loadVar1->getInEdges().begin();
319 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar1InStmt))
320 {
322 }
323 else if (const CopyStmt *copyStmt = SVFUtil::dyn_cast<CopyStmt>(loadVar1InStmt))
324 {
325 loadVar1 = svfir->getGNode(copyStmt->getRHSVarID());
326 if (!loadVar1->getInEdges().empty())
327 {
328 SVFStmt *loadVar1InStmt2 = *loadVar1->getInEdges().begin();
329 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar1InStmt2))
330 {
332 }
333 }
334 }
335 }
336 // for const X const, we may get concrete resVal instantly
337 // for var X const, we may get [0,1] if the intersection of var and const is not empty set
338 IntervalValue resVal = new_es[res_id].getInterval();
340 // If Var X const generates bottom value, it means this branch path is not feasible.
341 if (resVal.isBottom())
342 {
343 return false;
344 }
345
346 bool b0 = new_es[op0].getInterval().is_numeral();
347 bool b1 = new_es[op1].getInterval().is_numeral();
348
349 // if const X var, we should reverse op0 and op1.
350 if (b0 && !b1)
351 {
352 std::swap(op0, op1);
353 std::swap(load_op0, load_op1);
354 predicate = _switch_lhsrhs_predicate[predicate];
355 }
356 else
357 {
358 // if var X var, we cannot preset the branch condition to infer the intervals of var0,var1
359 if (!b0 && !b1)
360 {
361 as = new_es;
362 return true;
363 }
364 // if const X const, we can instantly get the resVal
365 else if (b0 && b1)
366 {
367 as = new_es;
368 return true;
369 }
370 }
371 // if cmp is 'var X const == false', we should reverse predicate 'var X' const == true'
372 // X' is reverse predicate of X
373 if (succ == 0)
374 {
375 predicate = _reverse_predicate[predicate];
376 }
377 else {}
378 // change interval range according to the compare predicate
379 AddressValue addrs;
380 if(load_op0 && new_es.inVarToAddrsTable(load_op0->getRHSVarID()))
381 addrs = new_es[load_op0->getRHSVarID()].getAddrs();
382
383 IntervalValue &lhs = new_es[op0].getInterval(), &rhs = new_es[op1].getInterval();
384 switch (predicate)
385 {
389 {
390 // Var == Const, so [var.lb, var.ub].meet_with(const)
392 // if lhs is register value, we should also change its mem obj
393 for (const auto &addr: addrs)
394 {
395 NodeID objId = new_es.getIDFromAddr(addr);
396 if (new_es.inAddrToValTable(objId))
397 {
398 new_es.load(addr).meet_with(rhs);
399 }
400 }
401 break;
402 }
406 // Compliment set
407 break;
412 // Var > Const, so [var.lb, var.ub].meet_with([Const+1, +INF])
413 lhs.meet_with(IntervalValue(rhs.lb() + 1, IntervalValue::plus_infinity()));
414 // if lhs is register value, we should also change its mem obj
415 for (const auto &addr: addrs)
416 {
417 NodeID objId = new_es.getIDFromAddr(addr);
418 if (new_es.inAddrToValTable(objId))
419 {
420 new_es.load(addr).meet_with(
422 }
423 }
424 break;
429 {
430 // Var >= Const, so [var.lb, var.ub].meet_with([Const, +INF])
432 // if lhs is register value, we should also change its mem obj
433 for (const auto &addr: addrs)
434 {
435 NodeID objId = new_es.getIDFromAddr(addr);
436 if (new_es.inAddrToValTable(objId))
437 {
438 new_es.load(addr).meet_with(
440 }
441 }
442 break;
443 }
448 {
449 // Var < Const, so [var.lb, var.ub].meet_with([-INF, const.ub-1])
450 lhs.meet_with(IntervalValue(IntervalValue::minus_infinity(), rhs.ub() - 1));
451 // if lhs is register value, we should also change its mem obj
452 for (const auto &addr: addrs)
453 {
454 NodeID objId = new_es.getIDFromAddr(addr);
455 if (new_es.inAddrToValTable(objId))
456 {
457 new_es.load(addr).meet_with(
459 }
460 }
461 break;
462 }
467 {
468 // Var <= Const, so [var.lb, var.ub].meet_with([-INF, const.ub])
470 // if lhs is register value, we should also change its mem obj
471 for (const auto &addr: addrs)
472 {
473 NodeID objId = new_es.getIDFromAddr(addr);
474 if (new_es.inAddrToValTable(objId))
475 {
476 new_es.load(addr).meet_with(
478 }
479 }
480 break;
481 }
483 break;
485 break;
486 default:
487 assert(false && "implement this part");
488 abort();
489 }
490 as = new_es;
491 return true;
492}
493
496{
498 IntervalValue& switch_cond = new_es[var->getId()].getInterval();
499 s64_t value = succ;
501 for (SVFStmt *cmpVarInStmt: var->getInEdges())
502 {
504 }
505 switch_cond.meet_with(IntervalValue(value, value));
506 if (switch_cond.isBottom())
507 {
508 return false;
509 }
510 while(!workList.empty())
511 {
512 const SVFStmt* stmt = workList.pop();
513 if (SVFUtil::isa<CopyStmt>(stmt))
514 {
515 IntervalValue& copy_cond = new_es[var->getId()].getInterval();
516 copy_cond.meet_with(IntervalValue(value, value));
517 }
518 else if (const LoadStmt* load = SVFUtil::dyn_cast<LoadStmt>(stmt))
519 {
520 if (new_es.inVarToAddrsTable(load->getRHSVarID()))
521 {
522 AddressValue &addrs = new_es[load->getRHSVarID()].getAddrs();
523 for (const auto &addr: addrs)
524 {
525 NodeID objId = new_es.getIDFromAddr(addr);
526 if (new_es.inAddrToValTable(objId))
527 {
529 }
530 }
531 }
532 }
533 }
534 as = new_es;
535 return true;
536}
537
540{
541 const SVFVar *cmpVar = intraEdge->getCondition();
542 if (cmpVar->getInEdges().empty())
543 {
545 intraEdge->getSuccessorCondValue(), as);
546 }
547 else
548 {
549 assert(!cmpVar->getInEdges().empty() &&
550 "no in edges?");
551 SVFStmt *cmpVarInStmt = *cmpVar->getInEdges().begin();
552 if (const CmpStmt *cmpStmt = SVFUtil::dyn_cast<CmpStmt>(cmpVarInStmt))
553 {
555 intraEdge->getSuccessorCondValue(), as);
556 }
557 else
558 {
560 cmpVar, intraEdge->getSuccessorCondValue(), as);
561 }
562 }
563 return true;
564}
567{
568 const ICFGNode* node = icfgSingletonWto->getICFGNode();
569 stat->getBlockTrace()++;
570
571 std::deque<const ICFGNode*> worklist;
572
573 const std::vector<const ICFGNode*>& worklist_vec = icfg->getSubNodes(node);
574 for (auto it = worklist_vec.begin(); it != worklist_vec.end(); ++it)
575 {
576 const ICFGNode* curNode = *it;
578 // handle SVF Stmt
579 for (const SVFStmt *stmt: curNode->getSVFStmts())
580 {
582 }
583 // inlining the callee by calling handleFunc for the callee function
584 if (const CallICFGNode* callnode = SVFUtil::dyn_cast<CallICFGNode>(curNode))
585 {
587 }
588 for (auto& detector: detectors)
589 detector->detect(getAbsStateFromTrace(node), node);
591 }
592}
593
597void AbstractInterpretation::handleWTOComponents(const std::list<const ICFGWTOComp*>& wtoComps)
598{
599 for (const ICFGWTOComp* wtoNode : wtoComps)
600 {
602 }
603}
604
606{
607 if (const ICFGSingletonWTO* node = SVFUtil::dyn_cast<ICFGSingletonWTO>(wtoNode))
608 {
609 if (mergeStatesFromPredecessors(node->getICFGNode()))
610 handleSingletonWTO(node);
611 }
612 // Handle WTO cycles
613 else if (const ICFGCycleWTO* cycle = SVFUtil::dyn_cast<ICFGCycleWTO>(wtoNode))
614 {
615 if (mergeStatesFromPredecessors(cycle->head()->getICFGNode()))
617 }
618 // Assert false for unknown WTO types
619 else
620 assert(false && "unknown WTO type!");
621}
622
624{
625 if (const CallICFGNode* callNode = SVFUtil::dyn_cast<CallICFGNode>(node))
626 {
627 if (isExtCall(callNode))
628 {
630 }
632 {
634 }
635 else if (isDirectCall(callNode))
636 {
638 }
639 else if (isIndirectCall(callNode))
640 {
642 }
643 else
644 assert(false && "implement this part");
645 }
646 else
647 assert (false && "it is not call node");
648}
649
651{
652 return SVFUtil::isExtCall(callNode->getCalledFunction());
653}
654
656{
657 callSiteStack.push_back(callNode);
659 for (auto& detector : detectors)
660 {
661 detector->handleStubFunctions(callNode);
662 }
663 callSiteStack.pop_back();
664}
665
667{
668 return recursiveFuns.find(fun) != recursiveFuns.end();
669}
670
672{
673 const FunObjVar *callfun = callNode->getCalledFunction();
674 if (!callfun)
675 return false;
676 else
677 return isRecursiveFun(callfun);
678}
679
681{
684 const RetICFGNode *retNode = callNode->getRetICFGNode();
685 if (retNode->getSVFStmts().size() > 0)
686 {
687 if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(*retNode->getSVFStmts().begin()))
688 {
689 if (!retPE->getLHSVar()->isPointer() &&
690 !retPE->getLHSVar()->isConstDataOrAggDataButNotNullPtr())
691 {
692 as[retPE->getLHSVarID()] = IntervalValue::top();
693 }
694 }
695 }
697}
698
705
707{
708 const FunObjVar *callfun =callNode->getCalledFunction();
709 if (!callfun)
710 return false;
711 else
712 return !callfun->isDeclaration();
713}
715{
717
719
720 const FunObjVar *calleeFun =callNode->getCalledFunction();
722 {
724 return;
725 }
726
727 callSiteStack.push_back(callNode);
728
729 const ICFGWTO* wto = funcToWTO[calleeFun];
730 handleWTOComponents(wto->getWTOComponents());
731
732 callSiteStack.pop_back();
733 // handle Ret node
734 const RetICFGNode *retNode = callNode->getRetICFGNode();
735 // resume ES to callnode
737}
738
744
746{
750 if (!as.inVarToAddrsTable(call_id))
751 {
752 return;
753 }
756 SVFVar *func_var = svfir->getGNode(as.getIDFromAddr(addr));
757
758 if(const FunObjVar* funObjVar = SVFUtil::dyn_cast<FunObjVar>(func_var))
759 {
761 {
762 if (isRecursiveCallSite(callNode, funObjVar))
763 return;
764 }
765
766 const FunObjVar* callfun = funObjVar->getFunction();
767 callSiteStack.push_back(callNode);
769
770 const ICFGWTO* wto = funcToWTO[callfun];
771 handleWTOComponents(wto->getWTOComponents());
772 callSiteStack.pop_back();
773 // handle Ret node
774 const RetICFGNode* retNode = callNode->getRetICFGNode();
776 }
777}
778
781{
782 const ICFGNode* cycle_head = cycle->head()->getICFGNode();
783 // Flag to indicate if we are in the increasing phase
784 bool increasing = true;
785 // Infinite loop until a fixpoint is reached,
786 for (u32_t cur_iter = 0;; cur_iter++)
787 {
788 // Start widening or narrowing if cur_iter >= widen threshold (widen delay)
790 {
791 // Widen or narrow after processing cycle head node
793 handleWTOComponent(cycle->head());
795 if (increasing)
796 {
797 // Widening, use different modes for nodes within recursions
798 if (isRecursiveFun(cycle->head()->getICFGNode()->getFun()))
799 {
800 // For nodes in recursions, widen to top in WIDEN_TOP mode
802 {
804 }
805 // Perform normal widening in WIDEN_NARROW mode
807 {
809 }
810 // In TOP mode, skipRecursiveCall will handle recursions,
811 // thus should not reach this branch
812 else
813 {
814 assert(false && "Recursion mode TOP should not reach here!");
815 }
816 }
817 // For nodes outside recursions, perform normal widening
818 else
819 {
821 }
822
824 {
825 increasing = false;
826 continue;
827 }
828 }
829 else
830 {
831 // Narrowing, use different modes for nodes within recursions
832 if (isRecursiveFun(cycle->head()->getICFGNode()->getFun()))
833 {
834 // For nodes in recursions, skip narrowing in WIDEN_TOP mode
836 {
837 break;
838 }
839 // Perform normal narrowing in WIDEN_NARROW mode
841 {
842 // Widening's fixpoint reached in the widening phase, switch to narrowing
845 {
846 // Narrowing's fixpoint reached in the narrowing phase, exit loop
847 break;
848 }
849 }
850 // In TOP mode, skipRecursiveCall will handle recursions,
851 // thus should not reach this branch
852 else
853 {
854 assert(false && "Recursion mode TOP should not reach here");
855 }
856 }
857 // For nodes outside recursions, perform normal narrowing
858 else
859 {
860 // Widening's fixpoint reached in the widening phase, switch to narrowing
863 {
864 // Narrowing's fixpoint reached in the narrowing phase, exit loop
865 break;
866 }
867 }
868 }
869 }
870 else
871 {
872 // Handle the cycle head
873 handleWTOComponent(cycle->head());
874 }
875 // Handle the cycle body
876 handleWTOComponents(cycle->getWTOComponents());
877 }
878}
879
881{
882 if (const AddrStmt *addr = SVFUtil::dyn_cast<AddrStmt>(stmt))
883 {
885 }
886 else if (const BinaryOPStmt *binary = SVFUtil::dyn_cast<BinaryOPStmt>(stmt))
887 {
889 }
890 else if (const CmpStmt *cmp = SVFUtil::dyn_cast<CmpStmt>(stmt))
891 {
893 }
894 else if (SVFUtil::isa<UnaryOPStmt>(stmt))
895 {
896 }
897 else if (SVFUtil::isa<BranchStmt>(stmt))
898 {
899 // branch stmt is handled in hasBranchES
900 }
901 else if (const LoadStmt *load = SVFUtil::dyn_cast<LoadStmt>(stmt))
902 {
903 updateStateOnLoad(load);
904 }
905 else if (const StoreStmt *store = SVFUtil::dyn_cast<StoreStmt>(stmt))
906 {
907 updateStateOnStore(store);
908 }
909 else if (const CopyStmt *copy = SVFUtil::dyn_cast<CopyStmt>(stmt))
910 {
912 }
913 else if (const GepStmt *gep = SVFUtil::dyn_cast<GepStmt>(stmt))
914 {
916 }
917 else if (const SelectStmt *select = SVFUtil::dyn_cast<SelectStmt>(stmt))
918 {
920 }
921 else if (const PhiStmt *phi = SVFUtil::dyn_cast<PhiStmt>(stmt))
922 {
924 }
925 else if (const CallPE *callPE = SVFUtil::dyn_cast<CallPE>(stmt))
926 {
927 // To handle Call Edge
929 }
930 else if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(stmt))
931 {
932 updateStateOnRet(retPE);
933 }
934 else
935 assert(false && "implement this part");
936 // NullPtr is index 0, it should not be changed
937 assert(!getAbsStateFromTrace(stmt->getICFGNode())[IRGraph::NullPtr].isInterval() &&
938 !getAbsStateFromTrace(stmt->getICFGNode())[IRGraph::NullPtr].isAddr());
939}
940
942{
944 const RetICFGNode *retNode = callNode->getRetICFGNode();
945 if (retNode->getSVFStmts().size() > 0)
946 {
947 if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(*retNode->getSVFStmts().begin()))
948 {
950 if (!retPE->getLHSVar()->isPointer() && !retPE->getLHSVar()->isConstDataOrAggDataButNotNullPtr())
951 as[retPE->getLHSVarID()] = IntervalValue::top();
952 }
953 }
954 if (!retNode->getOutEdges().empty())
955 {
956 if (retNode->getOutEdges().size() == 1)
957 {
958
959 }
960 else
961 {
962 return;
963 }
964 }
967 for (const SVFBasicBlock * bb: callNode->getCalledFunction()->getReachableBBs())
968 {
969 for (const ICFGNode* node: bb->getICFGNodeList())
970 {
971 for (const SVFStmt *stmt: node->getSVFStmts())
972 {
973 if (const StoreStmt *store = SVFUtil::dyn_cast<StoreStmt>(stmt))
974 {
975 const SVFVar *rhsVar = store->getRHSVar();
976 u32_t lhs = store->getLHSVarID();
977 if (as.inVarToAddrsTable(lhs))
978 {
979 if (!rhsVar->isPointer() && !rhsVar->isConstDataOrAggDataButNotNullPtr())
980 {
981 const AbstractValue &addrs = as[lhs];
982 for (const auto &addr: addrs.getAddrs())
983 {
984 as.store(addr, IntervalValue::top());
985 }
986 }
987 }
988 }
989 }
990 }
991 }
992}
993
994// count the size of memory map
996{
997 if (count == 0)
998 {
999 generalNumMap["ES_Var_AVG_Num"] = 0;
1000 generalNumMap["ES_Loc_AVG_Num"] = 0;
1001 generalNumMap["ES_Var_Addr_AVG_Num"] = 0;
1002 generalNumMap["ES_Loc_Addr_AVG_Num"] = 0;
1003 }
1004 ++count;
1005}
1006
1008{
1010 if (count > 0)
1011 {
1012 generalNumMap["ES_Var_AVG_Num"] /= count;
1013 generalNumMap["ES_Loc_AVG_Num"] /= count;
1014 generalNumMap["ES_Var_Addr_AVG_Num"] /= count;
1015 generalNumMap["ES_Loc_Addr_AVG_Num"] /= count;
1016 }
1017 generalNumMap["SVF_STMT_NUM"] = count;
1018 generalNumMap["ICFG_Node_Num"] = _ae->svfir->getICFG()->nodeNum;
1019 u32_t callSiteNum = 0;
1022 for (const auto &it: *_ae->svfir->getICFG())
1023 {
1024 if (it.second->getFun())
1025 {
1026 funs.insert(it.second->getFun());
1027 }
1028 if (const CallICFGNode *callNode = dyn_cast<CallICFGNode>(it.second))
1029 {
1030 if (!isExtCall(callNode))
1031 {
1032 callSiteNum++;
1033 }
1034 else
1035 {
1037 }
1038 }
1039 }
1040 generalNumMap["Func_Num"] = funs.size();
1041 generalNumMap["EXT_CallSite_Num"] = extCallSiteNum;
1042 generalNumMap["NonEXT_CallSite_Num"] = callSiteNum;
1043 timeStatMap["Total_Time(sec)"] = (double)(endTime - startTime) / TIMEINTERVAL;
1044
1045}
1046
1048{
1049 std::string fullName(_ae->moduleName);
1050 std::string name;
1051 std::string moduleName;
1052 if (fullName.find('/') == std::string::npos)
1053 {
1054 std::string name = fullName;
1055 moduleName = name.substr(0, fullName.find('.'));
1056 }
1057 else
1058 {
1059 std::string name = fullName.substr(fullName.find('/'), fullName.size());
1060 moduleName = name.substr(0, fullName.find('.'));
1061 }
1062
1063 SVFUtil::outs() << "\n************************\n";
1064 SVFUtil::outs() << "################ (program : " << moduleName << ")###############\n";
1065 SVFUtil::outs().flags(std::ios::left);
1066 unsigned field_width = 30;
1067 for (NUMStatMap::iterator it = generalNumMap.begin(), eit = generalNumMap.end(); it != eit; ++it)
1068 {
1069 // format out put with width 20 space
1070 std::cout << std::setw(field_width) << it->first << it->second << "\n";
1071 }
1072 SVFUtil::outs() << "-------------------------------------------------------\n";
1073 for (TIMEStatMap::iterator it = timeStatMap.begin(), eit = timeStatMap.end(); it != eit; ++it)
1074 {
1075 // format out put with width 20 space
1076 SVFUtil::outs() << std::setw(field_width) << it->first << it->second << "\n";
1077 }
1078 SVFUtil::outs() << "Memory usage: " << memUsage << "\n";
1079
1080 SVFUtil::outs() << "#######################################################" << std::endl;
1081 SVFUtil::outs().flush();
1082}
1083
1085{
1086 // traverse every ICFGNode
1087 Set<std::string> ae_checkpoint_names = {"svf_assert"};
1088 Set<std::string> buf_checkpoint_names = {"UNSAFE_BUFACCESS", "SAFE_BUFACCESS"};
1089 Set<std::string> nullptr_checkpoint_names = {"UNSAFE_LOAD", "SAFE_LOAD"};
1090
1091 for (auto it = svfir->getICFG()->begin(); it != svfir->getICFG()->end(); ++it)
1092 {
1093 const ICFGNode* node = it->second;
1094 if (const CallICFGNode *call = SVFUtil::dyn_cast<CallICFGNode>(node))
1095 {
1096 if (const FunObjVar *fun = call->getCalledFunction())
1097 {
1098 if (ae_checkpoint_names.find(fun->getName()) !=
1099 ae_checkpoint_names.end())
1100 {
1101 checkpoints.insert(call);
1102 }
1104 {
1105 if (buf_checkpoint_names.find(fun->getName()) !=
1107 {
1108 checkpoints.insert(call);
1109 }
1110 }
1112 {
1113 if (nullptr_checkpoint_names.find(fun->getName()) !=
1115 {
1116 checkpoints.insert(call);
1117 }
1118 }
1119 }
1120 }
1121 }
1122}
1123
1125{
1126 if (checkpoints.size() == 0)
1127 {
1128 return;
1129 }
1130 else
1131 {
1132 SVFUtil::errs() << SVFUtil::errMsg("At least one svf_assert has not been checked!!") << "\n";
1133 for (const CallICFGNode* call: checkpoints)
1134 SVFUtil::errs() << call->toString() + "\n";
1135 assert(false);
1136 }
1137
1138}
1139
1141{
1142 AbstractState& as = getAbsStateFromTrace(gep->getICFGNode());
1143 u32_t rhs = gep->getRHSVarID();
1144 u32_t lhs = gep->getLHSVarID();
1145 IntervalValue offsetPair = as.getElementIndex(gep);
1147 APOffset lb = offsetPair.lb().getIntNumeral() < Options::MaxFieldLimit()?
1148 offsetPair.lb().getIntNumeral(): Options::MaxFieldLimit();
1149 APOffset ub = offsetPair.ub().getIntNumeral() < Options::MaxFieldLimit()?
1150 offsetPair.ub().getIntNumeral(): Options::MaxFieldLimit();
1151 for (APOffset i = lb; i <= ub; i++)
1152 gepAddrs.join_with(as.getGepObjAddrs(rhs,IntervalValue(i)));
1153 as[lhs] = gepAddrs;
1154}
1155
1157{
1158 AbstractState& as = getAbsStateFromTrace(select->getICFGNode());
1159 u32_t res = select->getResID();
1160 u32_t tval = select->getTrueValue()->getId();
1161 u32_t fval = select->getFalseValue()->getId();
1162 u32_t cond = select->getCondition()->getId();
1163 if (as[cond].getInterval().is_numeral())
1164 {
1165 as[res] = as[cond].getInterval().is_zero() ? as[fval] : as[tval];
1166 }
1167 else
1168 {
1169 as[res] = as[tval];
1170 as[res].join_with(as[fval]);
1171 }
1172}
1173
1175{
1176 const ICFGNode* icfgNode = phi->getICFGNode();
1178 u32_t res = phi->getResID();
1180 for (u32_t i = 0; i < phi->getOpVarNum(); i++)
1181 {
1182 NodeID curId = phi->getOpVarID(i);
1183 const ICFGNode* opICFGNode = phi->getOpICFGNode(i);
1185 {
1189 // if IntraEdge, check the condition, if it is feasible, join the value
1190 // if IntraEdge but not conditional edge, join the value
1191 // if not IntraEdge, join the value
1192 if (edge)
1193 {
1194 const IntraCFGEdge* intraEdge = SVFUtil::cast<IntraCFGEdge>(edge);
1195 if (intraEdge->getCondition())
1196 {
1198 rhs.join_with(opAs[curId]);
1199 }
1200 else
1201 rhs.join_with(opAs[curId]);
1202 }
1203 else
1204 {
1205 rhs.join_with(opAs[curId]);
1206 }
1207 }
1208 }
1209 as[res] = rhs;
1210}
1211
1212
1214{
1215 AbstractState& as = getAbsStateFromTrace(callPE->getICFGNode());
1216 NodeID lhs = callPE->getLHSVarID();
1217 NodeID rhs = callPE->getRHSVarID();
1218 as[lhs] = as[rhs];
1219}
1220
1222{
1224 NodeID lhs = retPE->getLHSVarID();
1225 NodeID rhs = retPE->getRHSVarID();
1226 as[lhs] = as[rhs];
1227}
1228
1229
1231{
1232 AbstractState& as = getAbsStateFromTrace(addr->getICFGNode());
1233 as.initObjVar(SVFUtil::cast<ObjVar>(addr->getRHSVar()));
1234 if (addr->getRHSVar()->getType()->getKind() == SVFType::SVFIntegerTy)
1235 as[addr->getRHSVarID()].getInterval().meet_with(utils->getRangeLimitFromType(addr->getRHSVar()->getType()));
1236 as[addr->getLHSVarID()] = as[addr->getRHSVarID()];
1237}
1238
1239
1241{
1245 const ICFGNode* node = binary->getICFGNode();
1247 u32_t op0 = binary->getOpVarID(0);
1248 u32_t op1 = binary->getOpVarID(1);
1249 u32_t res = binary->getResID();
1250 if (!as.inVarToValTable(op0)) as[op0] = IntervalValue::top();
1251 if (!as.inVarToValTable(op1)) as[op1] = IntervalValue::top();
1252 IntervalValue &lhs = as[op0].getInterval(), &rhs = as[op1].getInterval();
1254 switch (binary->getOpcode())
1255 {
1256 case BinaryOPStmt::Add:
1257 case BinaryOPStmt::FAdd:
1258 resVal = (lhs + rhs);
1259 break;
1260 case BinaryOPStmt::Sub:
1261 case BinaryOPStmt::FSub:
1262 resVal = (lhs - rhs);
1263 break;
1264 case BinaryOPStmt::Mul:
1265 case BinaryOPStmt::FMul:
1266 resVal = (lhs * rhs);
1267 break;
1268 case BinaryOPStmt::SDiv:
1269 case BinaryOPStmt::FDiv:
1270 case BinaryOPStmt::UDiv:
1271 resVal = (lhs / rhs);
1272 break;
1273 case BinaryOPStmt::SRem:
1274 case BinaryOPStmt::FRem:
1275 case BinaryOPStmt::URem:
1276 resVal = (lhs % rhs);
1277 break;
1278 case BinaryOPStmt::Xor:
1279 resVal = (lhs ^ rhs);
1280 break;
1281 case BinaryOPStmt::And:
1282 resVal = (lhs & rhs);
1283 break;
1284 case BinaryOPStmt::Or:
1285 resVal = (lhs | rhs);
1286 break;
1287 case BinaryOPStmt::AShr:
1288 resVal = (lhs >> rhs);
1289 break;
1290 case BinaryOPStmt::Shl:
1291 resVal = (lhs << rhs);
1292 break;
1293 case BinaryOPStmt::LShr:
1294 resVal = (lhs >> rhs);
1295 break;
1296 default:
1297 assert(false && "undefined binary: ");
1298 }
1299 as[res] = resVal;
1300}
1301
1303{
1304 AbstractState& as = getAbsStateFromTrace(cmp->getICFGNode());
1305 u32_t op0 = cmp->getOpVarID(0);
1306 u32_t op1 = cmp->getOpVarID(1);
1307 // if it is address
1308 if (as.inVarToAddrsTable(op0) && as.inVarToAddrsTable(op1))
1309 {
1311 AddressValue addrOp0 = as[op0].getAddrs();
1312 AddressValue addrOp1 = as[op1].getAddrs();
1313 u32_t res = cmp->getResID();
1314 if (addrOp0.equals(addrOp1))
1315 {
1316 resVal = IntervalValue(1, 1);
1317 }
1318 else if (addrOp0.hasIntersect(addrOp1))
1319 {
1320 resVal = IntervalValue(0, 1);
1321 }
1322 else
1323 {
1324 resVal = IntervalValue(0, 0);
1325 }
1326 as[res] = resVal;
1327 }
1328 // if op0 or op1 is nullptr, compare abstractValue instead of touching addr or interval
1329 else if (op0 == IRGraph::NullPtr || op1 == IRGraph::NullPtr)
1330 {
1331 u32_t res = cmp->getResID();
1332 IntervalValue resVal = (as[op0].equals(as[op1])) ? IntervalValue(1, 1) : IntervalValue(0, 0);
1333 as[res] = resVal;
1334 }
1335 else
1336 {
1337 if (!as.inVarToValTable(op0))
1339 if (!as.inVarToValTable(op1))
1341 u32_t res = cmp->getResID();
1342 if (as.inVarToValTable(op0) && as.inVarToValTable(op1))
1343 {
1345 if (as[op0].isInterval() && as[op1].isInterval())
1346 {
1347 IntervalValue &lhs = as[op0].getInterval(),
1348 &rhs = as[op1].getInterval();
1349 // AbstractValue
1350 auto predicate = cmp->getPredicate();
1351 switch (predicate)
1352 {
1353 case CmpStmt::ICMP_EQ:
1354 case CmpStmt::FCMP_OEQ:
1355 case CmpStmt::FCMP_UEQ:
1356 resVal = (lhs == rhs);
1357 // resVal = (lhs.getInterval() == rhs.getInterval());
1358 break;
1359 case CmpStmt::ICMP_NE:
1360 case CmpStmt::FCMP_ONE:
1361 case CmpStmt::FCMP_UNE:
1362 resVal = (lhs != rhs);
1363 break;
1364 case CmpStmt::ICMP_UGT:
1365 case CmpStmt::ICMP_SGT:
1366 case CmpStmt::FCMP_OGT:
1367 case CmpStmt::FCMP_UGT:
1368 resVal = (lhs > rhs);
1369 break;
1370 case CmpStmt::ICMP_UGE:
1371 case CmpStmt::ICMP_SGE:
1372 case CmpStmt::FCMP_OGE:
1373 case CmpStmt::FCMP_UGE:
1374 resVal = (lhs >= rhs);
1375 break;
1376 case CmpStmt::ICMP_ULT:
1377 case CmpStmt::ICMP_SLT:
1378 case CmpStmt::FCMP_OLT:
1379 case CmpStmt::FCMP_ULT:
1380 resVal = (lhs < rhs);
1381 break;
1382 case CmpStmt::ICMP_ULE:
1383 case CmpStmt::ICMP_SLE:
1384 case CmpStmt::FCMP_OLE:
1385 case CmpStmt::FCMP_ULE:
1386 resVal = (lhs <= rhs);
1387 break;
1389 resVal = IntervalValue(0, 0);
1390 break;
1391 case CmpStmt::FCMP_TRUE:
1392 resVal = IntervalValue(1, 1);
1393 break;
1394 default:
1395 assert(false && "undefined compare: ");
1396 }
1397 as[res] = resVal;
1398 }
1399 else if (as[op0].isAddr() && as[op1].isAddr())
1400 {
1401 AddressValue &lhs = as[op0].getAddrs(),
1402 &rhs = as[op1].getAddrs();
1403 auto predicate = cmp->getPredicate();
1404 switch (predicate)
1405 {
1406 case CmpStmt::ICMP_EQ:
1407 case CmpStmt::FCMP_OEQ:
1408 case CmpStmt::FCMP_UEQ:
1409 {
1410 if (lhs.hasIntersect(rhs))
1411 {
1412 resVal = IntervalValue(0, 1);
1413 }
1414 else if (lhs.empty() && rhs.empty())
1415 {
1416 resVal = IntervalValue(1, 1);
1417 }
1418 else
1419 {
1420 resVal = IntervalValue(0, 0);
1421 }
1422 break;
1423 }
1424 case CmpStmt::ICMP_NE:
1425 case CmpStmt::FCMP_ONE:
1426 case CmpStmt::FCMP_UNE:
1427 {
1428 if (lhs.hasIntersect(rhs))
1429 {
1430 resVal = IntervalValue(0, 1);
1431 }
1432 else if (lhs.empty() && rhs.empty())
1433 {
1434 resVal = IntervalValue(0, 0);
1435 }
1436 else
1437 {
1438 resVal = IntervalValue(1, 1);
1439 }
1440 break;
1441 }
1442 case CmpStmt::ICMP_UGT:
1443 case CmpStmt::ICMP_SGT:
1444 case CmpStmt::FCMP_OGT:
1445 case CmpStmt::FCMP_UGT:
1446 {
1447 if (lhs.size() == 1 && rhs.size() == 1)
1448 {
1449 resVal = IntervalValue(*lhs.begin() > *rhs.begin());
1450 }
1451 else
1452 {
1453 resVal = IntervalValue(0, 1);
1454 }
1455 break;
1456 }
1457 case CmpStmt::ICMP_UGE:
1458 case CmpStmt::ICMP_SGE:
1459 case CmpStmt::FCMP_OGE:
1460 case CmpStmt::FCMP_UGE:
1461 {
1462 if (lhs.size() == 1 && rhs.size() == 1)
1463 {
1464 resVal = IntervalValue(*lhs.begin() >= *rhs.begin());
1465 }
1466 else
1467 {
1468 resVal = IntervalValue(0, 1);
1469 }
1470 break;
1471 }
1472 case CmpStmt::ICMP_ULT:
1473 case CmpStmt::ICMP_SLT:
1474 case CmpStmt::FCMP_OLT:
1475 case CmpStmt::FCMP_ULT:
1476 {
1477 if (lhs.size() == 1 && rhs.size() == 1)
1478 {
1479 resVal = IntervalValue(*lhs.begin() < *rhs.begin());
1480 }
1481 else
1482 {
1483 resVal = IntervalValue(0, 1);
1484 }
1485 break;
1486 }
1487 case CmpStmt::ICMP_ULE:
1488 case CmpStmt::ICMP_SLE:
1489 case CmpStmt::FCMP_OLE:
1490 case CmpStmt::FCMP_ULE:
1491 {
1492 if (lhs.size() == 1 && rhs.size() == 1)
1493 {
1494 resVal = IntervalValue(*lhs.begin() <= *rhs.begin());
1495 }
1496 else
1497 {
1498 resVal = IntervalValue(0, 1);
1499 }
1500 break;
1501 }
1503 resVal = IntervalValue(0, 0);
1504 break;
1505 case CmpStmt::FCMP_TRUE:
1506 resVal = IntervalValue(1, 1);
1507 break;
1508 default:
1509 assert(false && "undefined compare: ");
1510 }
1511 as[res] = resVal;
1512 }
1513 }
1514 }
1515}
1516
1518{
1520 u32_t rhs = load->getRHSVarID();
1521 u32_t lhs = load->getLHSVarID();
1522 as[lhs] = as.loadValue(rhs);
1523}
1524
1526{
1528 u32_t rhs = store->getRHSVarID();
1529 u32_t lhs = store->getLHSVarID();
1530 as.storeValue(lhs, as[rhs]);
1531}
1532
1534{
1535 auto getZExtValue = [](AbstractState& as, const SVFVar* var)
1536 {
1537 const SVFType* type = var->getType();
1538 if (SVFUtil::isa<SVFIntegerType>(type))
1539 {
1540 u32_t bits = type->getByteSize() * 8;
1541 if (as[var->getId()].getInterval().is_numeral())
1542 {
1543 if (bits == 8)
1544 {
1545 int8_t signed_i8_value = as[var->getId()].getInterval().getIntNumeral();
1548 }
1549 else if (bits == 16)
1550 {
1551 s16_t signed_i16_value = as[var->getId()].getInterval().getIntNumeral();
1554 }
1555 else if (bits == 32)
1556 {
1557 s32_t signed_i32_value = as[var->getId()].getInterval().getIntNumeral();
1560 }
1561 else if (bits == 64)
1562 {
1563 s64_t signed_i64_value = as[var->getId()].getInterval().getIntNumeral();
1565 // we only support i64 at most
1566 }
1567 else
1568 assert(false && "cannot support int type other than u8/16/32/64");
1569 }
1570 else
1571 {
1572 return IntervalValue::top(); // TODO: may have better solution
1573 }
1574 }
1575 return IntervalValue::top(); // TODO: may have better solution
1576 };
1577
1578 auto getTruncValue = [&](const AbstractState& as, const SVFVar* var,
1579 const SVFType* dstType)
1580 {
1581 const IntervalValue& itv = as[var->getId()].getInterval();
1582 if(itv.isBottom()) return itv;
1583 // get the value of ub and lb
1585 s64_t int_ub = itv.ub().getIntNumeral();
1586 // get dst type
1587 u32_t dst_bits = dstType->getByteSize() * 8;
1588 if (dst_bits == 8)
1589 {
1590 // get the signed value of ub and lb
1591 int8_t s8_lb = static_cast<int8_t>(int_lb);
1592 int8_t s8_ub = static_cast<int8_t>(int_ub);
1593 if (s8_lb > s8_ub)
1594 {
1595 // return range of s8
1597 }
1598 return IntervalValue(s8_lb, s8_ub);
1599 }
1600 else if (dst_bits == 16)
1601 {
1602 // get the signed value of ub and lb
1603 s16_t s16_lb = static_cast<s16_t>(int_lb);
1604 s16_t s16_ub = static_cast<s16_t>(int_ub);
1605 if (s16_lb > s16_ub)
1606 {
1607 // return range of s16
1609 }
1610 return IntervalValue(s16_lb, s16_ub);
1611 }
1612 else if (dst_bits == 32)
1613 {
1614 // get the signed value of ub and lb
1615 s32_t s32_lb = static_cast<s32_t>(int_lb);
1616 s32_t s32_ub = static_cast<s32_t>(int_ub);
1617 if (s32_lb > s32_ub)
1618 {
1619 // return range of s32
1621 }
1622 return IntervalValue(s32_lb, s32_ub);
1623 }
1624 else
1625 assert(false && "cannot support dst int type other than u8/16/32");
1626 };
1627
1628 AbstractState& as = getAbsStateFromTrace(copy->getICFGNode());
1629 u32_t lhs = copy->getLHSVarID();
1630 u32_t rhs = copy->getRHSVarID();
1631
1632 if (copy->getCopyKind() == CopyStmt::COPYVAL)
1633 {
1634 as[lhs] = as[rhs];
1635 }
1636 else if (copy->getCopyKind() == CopyStmt::ZEXT)
1637 {
1638 as[lhs] = getZExtValue(as, copy->getRHSVar());
1639 }
1640 else if (copy->getCopyKind() == CopyStmt::SEXT)
1641 {
1642 as[lhs] = as[rhs].getInterval();
1643 }
1644 else if (copy->getCopyKind() == CopyStmt::FPTOSI)
1645 {
1646 as[lhs] = as[rhs].getInterval();
1647 }
1648 else if (copy->getCopyKind() == CopyStmt::FPTOUI)
1649 {
1650 as[lhs] = as[rhs].getInterval();
1651 }
1652 else if (copy->getCopyKind() == CopyStmt::SITOFP)
1653 {
1654 as[lhs] = as[rhs].getInterval();
1655 }
1656 else if (copy->getCopyKind() == CopyStmt::UITOFP)
1657 {
1658 as[lhs] = as[rhs].getInterval();
1659 }
1660 else if (copy->getCopyKind() == CopyStmt::TRUNC)
1661 {
1662 as[lhs] = getTruncValue(as, copy->getRHSVar(), copy->getLHSVar()->getType());
1663 }
1664 else if (copy->getCopyKind() == CopyStmt::FPTRUNC)
1665 {
1666 as[lhs] = as[rhs].getInterval();
1667 }
1668 else if (copy->getCopyKind() == CopyStmt::INTTOPTR)
1669 {
1670 //insert nullptr
1671 }
1672 else if (copy->getCopyKind() == CopyStmt::PTRTOINT)
1673 {
1675 }
1676 else if (copy->getCopyKind() == CopyStmt::BITCAST)
1677 {
1678 if (as[rhs].isAddr())
1679 {
1680 as[lhs] = as[rhs];
1681 }
1682 else
1683 {
1684 // do nothing
1685 }
1686 }
1687 else
1688 assert(false && "undefined copy kind");
1689}
1690
1691
1692
#define TIMEINTERVAL
Definition SVFType.h:526
newitem type
Definition cJSON.cpp:2739
copy
Definition cJSON.cpp:414
const char *const name
Definition cJSON.h:264
void performStat() override
std::string getMemUsage()
AbstractInterpretation * _ae
Handles external API calls and manages abstract states.
Definition AbsExtAPI.h:44
void handleExtAPI(const CallICFGNode *call)
Handles an external API call.
IntervalValue getRangeLimitFromType(const SVFType *type)
Gets the range limit from a type.
void updateStateOnCall(const CallPE *callPE)
virtual bool isRecursiveCall(const CallICFGNode *callNode)
virtual void recursiveCallPass(const CallICFGNode *callNode)
void updateStateOnStore(const StoreStmt *store)
virtual bool isDirectCall(const CallICFGNode *callNode)
bool hasAbsStateFromTrace(const ICFGNode *node)
virtual void handleCycleWTO(const ICFGCycleWTO *cycle)
handle wto cycle (loop)
Set< const FunObjVar * > recursiveFuns
void updateStateOnGep(const GepStmt *gep)
virtual void extCallPass(const CallICFGNode *callNode)
virtual void handleGlobalNode()
Global ICFGNode is handled at the entry of the program,.
bool mergeStatesFromPredecessors(const ICFGNode *icfgNode)
virtual void directCallFunPass(const CallICFGNode *callNode)
void handleWTOComponent(const ICFGWTOComp *wtoComp)
virtual bool isExtCall(const CallICFGNode *callNode)
virtual bool isIndirectCall(const CallICFGNode *callNode)
void initWTO()
Compute IWTO for each function partition entry.
void updateStateOnPhi(const PhiStmt *phi)
bool isBranchFeasible(const IntraCFGEdge *intraEdge, AbstractState &as)
std::vector< std::unique_ptr< AEDetector > > detectors
AbstractState & getAbsStateFromTrace(const ICFGNode *node)
Retrieves the abstract state from the trace for a given ICFG node.
Set< const CallICFGNode * > checkpoints
bool isSwitchBranchFeasible(const SVFVar *var, s64_t succ, AbstractState &as)
void updateStateOnSelect(const SelectStmt *select)
virtual void handleSVFStatement(const SVFStmt *stmt)
virtual void indirectCallFunPass(const CallICFGNode *callNode)
SVFIR * svfir
protected data members, also used in subclasses
virtual void runOnModule(ICFG *icfg)
virtual bool isRecursiveCallSite(const CallICFGNode *callNode, const FunObjVar *)
virtual bool isRecursiveFun(const FunObjVar *fun)
void updateStateOnAddr(const AddrStmt *addr)
virtual ~AbstractInterpretation()
Destructor.
bool isCmpBranchFeasible(const CmpStmt *cmpStmt, s64_t succ, AbstractState &as)
virtual void handleCallSite(const ICFGNode *node)
void updateStateOnRet(const RetPE *retPE)
void handleWTOComponents(const std::list< const ICFGWTOComp * > &wtoComps)
Handle two types of WTO components (singleton and cycle)
void updateStateOnCopy(const CopyStmt *copy)
Set< std::pair< const CallICFGNode *, NodeID > > nonRecursiveCallSites
void updateStateOnLoad(const LoadStmt *load)
void updateStateOnBinary(const BinaryOPStmt *binary)
Map< const ICFGNode *, AbstractState > abstractTrace
std::vector< const CallICFGNode * > callSiteStack
virtual void handleSingletonWTO(const ICFGSingletonWTO *icfgSingletonWto)
handle instructions in svf basic blocks
Map< s32_t, s32_t > _switch_lhsrhs_predicate
void updateStateOnCmp(const CmpStmt *cmp)
virtual void SkipRecursiveCall(const CallICFGNode *callnode)
Map< const FunObjVar *, const ICFGWTO * > funcToWTO
AbstractState widening(const AbstractState &other)
domain widen with other, and return the widened domain
AddressValue & getAddrs()
bool meet_with(const AddressValue &other)
Return a intersected AddressValue.
AddrSet::const_iterator begin() const
static AndersenWaveDiff * createAndersenWaveDiff(SVFIR *_pag)
Create an singleton instance directly instead of invoking llvm pass manager.
Definition Andersen.h:407
NodeID getRHSVarID() const
NodeID getLHSVarID() const
s64_t getIntNumeral() const
const CallGraphNode * getCallGraphNode(const std::string &name)
Get call graph node.
@ ICMP_SGT
signed greater than
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ ICMP_UGE
unsigned greater or equal
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ ICMP_ULE
unsigned less or equal
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ ICMP_NE
not equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_ULT
unsigned less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ ICMP_SLT
signed less than
@ ICMP_UGT
unsigned greater than
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_SLE
signed less or equal
virtual const FunObjVar * getFunction() const
Get containing function, or null for globals/constants.
bool isDeclaration() const
iterator begin()
Iterators.
u32_t nodeNum
total num of edge
NodeType * getGNode(NodeID id) const
Get a node.
const GEdgeSetTy & getInEdges() const
virtual const std::string toString() const
Definition ICFG.cpp:60
const SVFStmtList & getSVFStmts() const
Definition ICFGNode.h:117
ICFGEdge * getICFGEdge(const ICFGNode *src, const ICFGNode *dst, ICFGEdge::ICFGEdgeK kind)
Get a SVFG edge according to src and dst.
Definition ICFG.cpp:311
const std::vector< const ICFGNode * > & getSubNodes(const ICFGNode *node) const
Definition ICFG.h:241
FunEntryICFGNode * getFunEntryICFGNode(const FunObjVar *fun)
Add a function entry node.
Definition ICFG.cpp:242
GlobalICFGNode * getGlobalICFGNode() const
Definition ICFG.h:236
NodeID getBlkPtr() const
Definition IRGraph.h:255
void meet_with(const IntervalValue &other)
Return a intersected IntervalValue.
static BoundedInt minus_infinity()
Get minus infinity -inf.
static BoundedInt plus_infinity()
Get plus infinity +inf.
static IntervalValue top()
Create the IntervalValue [-inf, +inf].
const BoundedInt & lb() const
Return the lower bound.
static const Option< u32_t > MaxFieldLimit
Maximum number of field derivations for an object.
Definition Options.h:39
static const Option< u32_t > WidenDelay
Definition Options.h:247
static const Option< bool > NullDerefCheck
nullptr dereference checker, Default: false
Definition Options.h:257
static const Option< bool > PStat
Definition Options.h:120
static const OptionMap< AbstractInterpretation::HandleRecur > HandleRecur
recursion handling mode, Default: TOP
Definition Options.h:249
static const Option< bool > BufferOverflowCheck
buffer overflow checker, Default: false
Definition Options.h:255
CallGraph * getCallGraph() const
Return call graph.
SCCDetection< CallGraph * > CallGraphSCC
CallGraphSCC * getCallGraphSCC() const
Return call graph SCC.
CallGraph * getCallGraph()
Definition SVFIR.h:184
ICFG * getICFG() const
Definition SVFIR.h:163
const CallSiteToFunPtrMap & getIndirectCallsites() const
Add/get indirect callsites.
Definition SVFIR.h:378
static SVFIR * getPAG(bool buildFromFile=false)
Singleton design here to make sure we only have one instance during any analysis.
Definition SVFIR.h:116
NUMStatMap generalNumMap
Definition SVFStat.h:76
virtual void endClk()
Definition SVFStat.h:63
std::string moduleName
Definition SVFStat.h:99
TIMEStatMap timeStatMap
Definition SVFStat.h:78
virtual void startClk()
Definition SVFStat.h:58
double endTime
Definition SVFStat.h:81
double startTime
Definition SVFStat.h:80
ICFGNode * getICFGNode() const
u32_t getByteSize() const
Definition SVFType.h:244
std::string errMsg(const std::string &msg)
Print error message by converting a string into red string output.
Definition SVFUtil.cpp:78
std::ostream & errs()
Overwrite llvm::errs()
Definition SVFUtil.h:58
bool isExtCall(const FunObjVar *fun)
Definition SVFUtil.cpp:437
std::ostream & outs()
Overwrite llvm::outs()
Definition SVFUtil.h:52
for isBitcode
Definition BasicTypes.h:68
u32_t NodeID
Definition GeneralType.h:56
s64_t APOffset
Definition GeneralType.h:60
signed short s16_t
Definition GeneralType.h:54
unsigned short u16_t
Definition GeneralType.h:53
llvm::IRBuilder IRBuilder
Definition BasicTypes.h:74
signed s32_t
Definition GeneralType.h:48
unsigned u32_t
Definition GeneralType.h:47
signed long long s64_t
Definition GeneralType.h:50