Static Value-Flow Analysis
Loading...
Searching...
No Matches
AbstractInterpretation.cpp
Go to the documentation of this file.
1//===- AbstractExecution.cpp -- Abstract Execution---------------------------------//
2//
3// SVF: Static Value-Flow Analysis
4//
5// Copyright (C) <2013-> <Yulei Sui>
6//
7
8// This program is free software: you can redistribute it and/or modify
9// it under the terms of the GNU Affero General Public License as published by
10// the Free Software Foundation, either version 3 of the License, or
11// (at your option) any later version.
12
13// This program is distributed in the hope that it will be useful,
14// but WITHOUT ANY WARRANTY; without even the implied warranty of
15// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16// GNU Affero General Public License for more details.
17
18// You should have received a copy of the GNU Affero General Public License
19// along with this program. If not, see <http://www.gnu.org/licenses/>.
20//
21//===----------------------------------------------------------------------===//
22
23
24//
25// Created on: Jan 10, 2024
26// Author: Xiao Cheng, Jiawei Wang
27//
28
30#include "AE/Svfexe/AbsExtAPI.h"
31#include "SVFIR/SVFIR.h"
32#include "Util/Options.h"
33#include "Util/WorkList.h"
34#include "Graphs/CallGraph.h"
35#include "WPA/Andersen.h"
36#include <cmath>
37
38using namespace SVF;
39using namespace SVFUtil;
40using namespace z3;
41
42
44{
45 stat->startClk();
46 icfg = _icfg;
49
52
53 analyse();
55 stat->endClk();
57 if (Options::PStat())
59 for (auto& detector: detectors)
60 detector->reportBug();
61}
62
69{
70 delete stat;
71 for (auto it: funcToWTO)
72 delete it.second;
73}
74
85{
87 // Detect if the call graph has cycles by finding its strongly connected components (SCC)
89 callGraphScc->find();
90 CallGraph* callGraph = ander->getCallGraph();
91
92 // Iterate through the call graph
93 for (auto it = callGraph->begin(); it != callGraph->end(); it++)
94 {
95 // Check if the current function is part of a cycle
96 if (callGraphScc->isInCycle(it->second->getId()))
97 recursiveFuns.insert(it->second->getFunction()); // Mark the function as recursive
98
99 // Calculate ICFGWTO for each function/recursion
100 const FunObjVar *fun = it->second->getFunction();
101 if (fun->isDeclaration())
102 continue;
103
104 NodeID repNodeId = callGraphScc->repNode(it->second->getId());
105 auto cgSCCNodes = callGraphScc->subNodes(repNodeId);
106
107 // Identify if this node is an SCC entry (nodes who have incoming edges
108 // from nodes outside the SCC). Also identify non-recursive callsites.
109 bool isEntry = false;
110 if (it->second->getInEdges().empty())
111 isEntry = true;
112 for (auto inEdge: it->second->getInEdges())
113 {
114 NodeID srcNodeId = inEdge->getSrcID();
115 if (!cgSCCNodes.test(srcNodeId))
116 {
117 isEntry = true;
118 const CallICFGNode *callSite = nullptr;
119 if (inEdge->isDirectCallEdge())
120 callSite = *(inEdge->getDirectCalls().begin());
121 else if (inEdge->isIndirectCallEdge())
122 callSite = *(inEdge->getIndirectCalls().begin());
123 else
124 assert(false && "CallGraphEdge must "
125 "be either direct or indirect!");
126
128 {callSite, inEdge->getDstNode()->getFunction()->getId()});
129 }
130 }
131
132 // Compute IWTO for the function partition entered from each partition entry
133 if (isEntry)
134 {
136 for (const auto& node: cgSCCNodes)
137 {
138 funcScc.insert(callGraph->getGNode(node)->getFunction());
139 }
141 iwto->init();
142 funcToWTO[it->second->getFunction()] = iwto;
143 }
144 }
145}
146
149{
150 initWTO();
151 // handle Global ICFGNode of SVFModule
155 if (const CallGraphNode* cgn = svfir->getCallGraph()->getCallGraphNode("main"))
156 {
157 const ICFGWTO* wto = funcToWTO[cgn->getFunction()];
158 handleWTOComponents(wto->getWTOComponents());
159 }
160}
161
164{
165 const ICFGNode* node = icfg->getGlobalICFGNode();
168 // Global Node, we just need to handle addr, load, store, copy and gep
169 for (const SVFStmt *stmt: node->getSVFStmts())
170 {
172 }
173}
174
179{
180 std::vector<AbstractState> workList;
182 for (auto& edge: icfgNode->getInEdges())
183 {
184 if (abstractTrace.find(edge->getSrcNode()) != abstractTrace.end())
185 {
186
187 if (const IntraCFGEdge *intraCfgEdge =
188 SVFUtil::dyn_cast<IntraCFGEdge>(edge))
189 {
190 AbstractState tmpEs = abstractTrace[edge->getSrcNode()];
191 if (intraCfgEdge->getCondition())
192 {
194 {
195 workList.push_back(tmpEs);
196 }
197 else
198 {
199 // do nothing
200 }
201 }
202 else
203 {
204 workList.push_back(tmpEs);
205 }
206 }
207 else if (const CallCFGEdge *callCfgEdge =
208 SVFUtil::dyn_cast<CallCFGEdge>(edge))
209 {
210
211 // context insensitive implementation
212 workList.push_back(
213 abstractTrace[callCfgEdge->getSrcNode()]);
214 }
215 else if (const RetCFGEdge *retCfgEdge =
216 SVFUtil::dyn_cast<RetCFGEdge>(edge))
217 {
218 switch (Options::HandleRecur())
219 {
220 case TOP:
221 {
222 workList.push_back(abstractTrace[retCfgEdge->getSrcNode()]);
223 break;
224 }
225 case WIDEN_ONLY:
226 case WIDEN_NARROW:
227 {
228 const RetICFGNode* returnSite = SVFUtil::dyn_cast<RetICFGNode>(icfgNode);
229 const CallICFGNode* callSite = returnSite->getCallICFGNode();
231 workList.push_back(abstractTrace[retCfgEdge->getSrcNode()]);
232 }
233 }
234 }
235 else
236 assert(false && "Unhandled ICFGEdge type encountered!");
237 }
238 }
239 if (workList.size() == 0)
240 {
241 return false;
242 }
243 else
244 {
245 while (!workList.empty())
246 {
247 preAs.joinWith(workList.back());
248 workList.pop_back();
249 }
250 // Has ES on the in edges - Feasible block
251 // update post as
252 abstractTrace[icfgNode] = preAs;
253 return true;
254 }
255}
256
257
260{
262 // get cmp stmt's op0, op1, and predicate
263 NodeID op0 = cmpStmt->getOpVarID(0);
264 NodeID op1 = cmpStmt->getOpVarID(1);
265 NodeID res_id = cmpStmt->getResID();
266 s32_t predicate = cmpStmt->getPredicate();
267 // if op0 or op1 is nullptr, no need to change value, just copy the state
269 {
270 as = new_es;
271 return true;
272 }
273 // if op0 or op1 is undefined, return;
274 // skip address compare
275 if (new_es.inVarToAddrsTable(op0) || new_es.inVarToAddrsTable(op1))
276 {
277 as = new_es;
278 return true;
279 }
280 const LoadStmt *load_op0 = nullptr;
281 const LoadStmt *load_op1 = nullptr;
282 // get '%1 = load i32 s', and load inst may not exist
284 if (!loadVar0->getInEdges().empty())
285 {
286 SVFStmt *loadVar0InStmt = *loadVar0->getInEdges().begin();
287 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar0InStmt))
288 {
290 }
291 else if (const CopyStmt *copyStmt = SVFUtil::dyn_cast<CopyStmt>(loadVar0InStmt))
292 {
293 loadVar0 = svfir->getGNode(copyStmt->getRHSVarID());
294 if (!loadVar0->getInEdges().empty())
295 {
296 SVFStmt *loadVar0InStmt2 = *loadVar0->getInEdges().begin();
297 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar0InStmt2))
298 {
300 }
301 }
302 }
303 }
304
306 if (!loadVar1->getInEdges().empty())
307 {
308 SVFStmt *loadVar1InStmt = *loadVar1->getInEdges().begin();
309 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar1InStmt))
310 {
312 }
313 else if (const CopyStmt *copyStmt = SVFUtil::dyn_cast<CopyStmt>(loadVar1InStmt))
314 {
315 loadVar1 = svfir->getGNode(copyStmt->getRHSVarID());
316 if (!loadVar1->getInEdges().empty())
317 {
318 SVFStmt *loadVar1InStmt2 = *loadVar1->getInEdges().begin();
319 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar1InStmt2))
320 {
322 }
323 }
324 }
325 }
326 // for const X const, we may get concrete resVal instantly
327 // for var X const, we may get [0,1] if the intersection of var and const is not empty set
328 IntervalValue resVal = new_es[res_id].getInterval();
330 // If Var X const generates bottom value, it means this branch path is not feasible.
331 if (resVal.isBottom())
332 {
333 return false;
334 }
335
336 bool b0 = new_es[op0].getInterval().is_numeral();
337 bool b1 = new_es[op1].getInterval().is_numeral();
338
339 // if const X var, we should reverse op0 and op1.
340 if (b0 && !b1)
341 {
342 std::swap(op0, op1);
343 std::swap(load_op0, load_op1);
344 predicate = _switch_lhsrhs_predicate[predicate];
345 }
346 else
347 {
348 // if var X var, we cannot preset the branch condition to infer the intervals of var0,var1
349 if (!b0 && !b1)
350 {
351 as = new_es;
352 return true;
353 }
354 // if const X const, we can instantly get the resVal
355 else if (b0 && b1)
356 {
357 as = new_es;
358 return true;
359 }
360 }
361 // if cmp is 'var X const == false', we should reverse predicate 'var X' const == true'
362 // X' is reverse predicate of X
363 if (succ == 0)
364 {
365 predicate = _reverse_predicate[predicate];
366 }
367 else {}
368 // change interval range according to the compare predicate
369 AddressValue addrs;
370 if(load_op0 && new_es.inVarToAddrsTable(load_op0->getRHSVarID()))
371 addrs = new_es[load_op0->getRHSVarID()].getAddrs();
372
373 IntervalValue &lhs = new_es[op0].getInterval(), &rhs = new_es[op1].getInterval();
374 switch (predicate)
375 {
379 {
380 // Var == Const, so [var.lb, var.ub].meet_with(const)
382 // if lhs is register value, we should also change its mem obj
383 for (const auto &addr: addrs)
384 {
385 NodeID objId = new_es.getIDFromAddr(addr);
386 if (new_es.inAddrToValTable(objId))
387 {
388 new_es.load(addr).meet_with(rhs);
389 }
390 }
391 break;
392 }
396 // Compliment set
397 break;
402 // Var > Const, so [var.lb, var.ub].meet_with([Const+1, +INF])
403 lhs.meet_with(IntervalValue(rhs.lb() + 1, IntervalValue::plus_infinity()));
404 // if lhs is register value, we should also change its mem obj
405 for (const auto &addr: addrs)
406 {
407 NodeID objId = new_es.getIDFromAddr(addr);
408 if (new_es.inAddrToValTable(objId))
409 {
410 new_es.load(addr).meet_with(
412 }
413 }
414 break;
419 {
420 // Var >= Const, so [var.lb, var.ub].meet_with([Const, +INF])
422 // if lhs is register value, we should also change its mem obj
423 for (const auto &addr: addrs)
424 {
425 NodeID objId = new_es.getIDFromAddr(addr);
426 if (new_es.inAddrToValTable(objId))
427 {
428 new_es.load(addr).meet_with(
430 }
431 }
432 break;
433 }
438 {
439 // Var < Const, so [var.lb, var.ub].meet_with([-INF, const.ub-1])
440 lhs.meet_with(IntervalValue(IntervalValue::minus_infinity(), rhs.ub() - 1));
441 // if lhs is register value, we should also change its mem obj
442 for (const auto &addr: addrs)
443 {
444 NodeID objId = new_es.getIDFromAddr(addr);
445 if (new_es.inAddrToValTable(objId))
446 {
447 new_es.load(addr).meet_with(
449 }
450 }
451 break;
452 }
457 {
458 // Var <= Const, so [var.lb, var.ub].meet_with([-INF, const.ub])
460 // if lhs is register value, we should also change its mem obj
461 for (const auto &addr: addrs)
462 {
463 NodeID objId = new_es.getIDFromAddr(addr);
464 if (new_es.inAddrToValTable(objId))
465 {
466 new_es.load(addr).meet_with(
468 }
469 }
470 break;
471 }
473 break;
475 break;
476 default:
477 assert(false && "implement this part");
478 abort();
479 }
480 as = new_es;
481 return true;
482}
483
486{
488 IntervalValue& switch_cond = new_es[var->getId()].getInterval();
489 s64_t value = succ;
491 for (SVFStmt *cmpVarInStmt: var->getInEdges())
492 {
494 }
495 switch_cond.meet_with(IntervalValue(value, value));
496 if (switch_cond.isBottom())
497 {
498 return false;
499 }
500 while(!workList.empty())
501 {
502 const SVFStmt* stmt = workList.pop();
503 if (SVFUtil::isa<CopyStmt>(stmt))
504 {
505 IntervalValue& copy_cond = new_es[var->getId()].getInterval();
506 copy_cond.meet_with(IntervalValue(value, value));
507 }
508 else if (const LoadStmt* load = SVFUtil::dyn_cast<LoadStmt>(stmt))
509 {
510 if (new_es.inVarToAddrsTable(load->getRHSVarID()))
511 {
512 AddressValue &addrs = new_es[load->getRHSVarID()].getAddrs();
513 for (const auto &addr: addrs)
514 {
515 NodeID objId = new_es.getIDFromAddr(addr);
516 if (new_es.inAddrToValTable(objId))
517 {
519 }
520 }
521 }
522 }
523 }
524 as = new_es;
525 return true;
526}
527
530{
531 const SVFVar *cmpVar = intraEdge->getCondition();
532 if (cmpVar->getInEdges().empty())
533 {
535 intraEdge->getSuccessorCondValue(), as);
536 }
537 else
538 {
539 assert(!cmpVar->getInEdges().empty() &&
540 "no in edges?");
541 SVFStmt *cmpVarInStmt = *cmpVar->getInEdges().begin();
542 if (const CmpStmt *cmpStmt = SVFUtil::dyn_cast<CmpStmt>(cmpVarInStmt))
543 {
545 intraEdge->getSuccessorCondValue(), as);
546 }
547 else
548 {
550 cmpVar, intraEdge->getSuccessorCondValue(), as);
551 }
552 }
553 return true;
554}
557{
558 const ICFGNode* node = icfgSingletonWto->getICFGNode();
559 stat->getBlockTrace()++;
560
561 std::deque<const ICFGNode*> worklist;
562
564 // handle SVF Stmt
565 for (const SVFStmt *stmt: node->getSVFStmts())
566 {
568 }
569 // inlining the callee by calling handleFunc for the callee function
570 if (const CallICFGNode* callnode = SVFUtil::dyn_cast<CallICFGNode>(node))
571 {
573 }
574 for (auto& detector: detectors)
575 detector->detect(getAbsStateFromTrace(node), node);
577}
578
582void AbstractInterpretation::handleWTOComponents(const std::list<const ICFGWTOComp*>& wtoComps)
583{
584 for (const ICFGWTOComp* wtoNode : wtoComps)
585 {
587 }
588}
589
591{
592 if (const ICFGSingletonWTO* node = SVFUtil::dyn_cast<ICFGSingletonWTO>(wtoNode))
593 {
594 if (mergeStatesFromPredecessors(node->getICFGNode()))
595 handleSingletonWTO(node);
596 }
597 // Handle WTO cycles
598 else if (const ICFGCycleWTO* cycle = SVFUtil::dyn_cast<ICFGCycleWTO>(wtoNode))
599 {
600 if (mergeStatesFromPredecessors(cycle->head()->getICFGNode()))
602 }
603 // Assert false for unknown WTO types
604 else
605 assert(false && "unknown WTO type!");
606}
607
609{
610 if (const CallICFGNode* callNode = SVFUtil::dyn_cast<CallICFGNode>(node))
611 {
612 if (isExtCall(callNode))
613 {
615 }
617 {
619 }
620 else if (isDirectCall(callNode))
621 {
623 }
624 else if (isIndirectCall(callNode))
625 {
627 }
628 else
629 assert(false && "implement this part");
630 }
631 else
632 assert (false && "it is not call node");
633}
634
636{
637 return SVFUtil::isExtCall(callNode->getCalledFunction());
638}
639
641{
642 callSiteStack.push_back(callNode);
644 for (auto& detector : detectors)
645 {
646 detector->handleStubFunctions(callNode);
647 }
648 callSiteStack.pop_back();
649}
650
652{
653 return recursiveFuns.find(fun) != recursiveFuns.end();
654}
655
657{
658 const FunObjVar *callfun = callNode->getCalledFunction();
659 if (!callfun)
660 return false;
661 else
662 return isRecursiveFun(callfun);
663}
664
666{
669 const RetICFGNode *retNode = callNode->getRetICFGNode();
670 if (retNode->getSVFStmts().size() > 0)
671 {
672 if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(*retNode->getSVFStmts().begin()))
673 {
674 if (!retPE->getLHSVar()->isPointer() &&
675 !retPE->getLHSVar()->isConstDataOrAggDataButNotNullPtr())
676 {
677 as[retPE->getLHSVarID()] = IntervalValue::top();
678 }
679 }
680 }
682}
683
690
692{
693 const FunObjVar *callfun =callNode->getCalledFunction();
694 if (!callfun)
695 return false;
696 else
697 return !callfun->isDeclaration();
698}
700{
702
704
705 const FunObjVar *calleeFun =callNode->getCalledFunction();
707 {
708 // If this CallICFGNode is a recursive callsite (i.e. this Node
709 // resides in a recursive function 'fun' and its callee function is
710 // in the same SCC with the fun), then skip it. Since the callee
711 // function is handled during the handling of WTO of the whole recursion.
713 return;
714 }
715 else
716 {
717 // When Options::HandleRecur() == TOP, skipRecursiveCall will handle recursions,
718 // thus should not reach this branch
719 assert(false && "Recursion mode TOP should not reach here!");
720 }
721
722 callSiteStack.push_back(callNode);
723
724 const ICFGWTO* wto = funcToWTO[calleeFun];
725 handleWTOComponents(wto->getWTOComponents());
726
727 callSiteStack.pop_back();
728 // handle Ret node
729 const RetICFGNode *retNode = callNode->getRetICFGNode();
730 // resume ES to callnode
732}
733
739
741{
745 if (!as.inVarToAddrsTable(call_id))
746 {
747 return;
748 }
751 SVFVar *func_var = svfir->getGNode(as.getIDFromAddr(addr));
752
753 if(const FunObjVar* funObjVar = SVFUtil::dyn_cast<FunObjVar>(func_var))
754 {
756 {
757 if (isRecursiveCallSite(callNode, funObjVar))
758 return;
759 }
760
761 const FunObjVar* callfun = funObjVar->getFunction();
762 callSiteStack.push_back(callNode);
764
765 const ICFGWTO* wto = funcToWTO[callfun];
766 handleWTOComponents(wto->getWTOComponents());
767 callSiteStack.pop_back();
768 // handle Ret node
769 const RetICFGNode* retNode = callNode->getRetICFGNode();
771 }
772}
773
776{
777 const ICFGNode* cycle_head = cycle->head()->getICFGNode();
778 // Flag to indicate if we are in the increasing phase
779 bool increasing = true;
780 // Infinite loop until a fixpoint is reached,
781 for (u32_t cur_iter = 0;; cur_iter++)
782 {
783 // Start widening or narrowing if cur_iter >= widen threshold (widen delay)
785 {
786 // Widen or narrow after processing cycle head node
788 handleWTOComponent(cycle->head());
790 if (increasing)
791 {
792
793 if (isRecursiveFun(cycle->head()->getICFGNode()->getFun()) &&
796 {
797 // When Options::HandleRecur() == TOP, skipRecursiveCall will handle recursions,
798 // thus should not reach this branch
799 assert(false && "Recursion mode TOP should not reach here!");
800 }
801
802 // Widening
804
806 {
807 increasing = false;
808 continue;
809 }
810 }
811 else
812 {
813 // Narrowing, use different modes for nodes within recursions
814 if (isRecursiveFun(cycle->head()->getICFGNode()->getFun()))
815 {
816 // For nodes in recursions, skip narrowing in WIDEN_TOP mode
818 {
819 break;
820 }
821 // Perform normal narrowing in WIDEN_NARROW mode
823 {
824 // Widening's fixpoint reached in the widening phase, switch to narrowing
827 {
828 // Narrowing's fixpoint reached in the narrowing phase, exit loop
829 break;
830 }
831 }
832 // When Options::HandleRecur() == TOP, skipRecursiveCall will handle recursions,
833 // thus should not reach this branch
834 else
835 {
836 assert(false && "Recursion mode TOP should not reach here");
837 }
838 }
839 // For nodes outside recursions, perform normal narrowing
840 else
841 {
842 // Widening's fixpoint reached in the widening phase, switch to narrowing
845 {
846 // Narrowing's fixpoint reached in the narrowing phase, exit loop
847 break;
848 }
849 }
850 }
851 }
852 else
853 {
854 // Handle the cycle head
855 handleWTOComponent(cycle->head());
856 }
857 // Handle the cycle body
858 handleWTOComponents(cycle->getWTOComponents());
859 }
860}
861
863{
864 if (const AddrStmt *addr = SVFUtil::dyn_cast<AddrStmt>(stmt))
865 {
867 }
868 else if (const BinaryOPStmt *binary = SVFUtil::dyn_cast<BinaryOPStmt>(stmt))
869 {
871 }
872 else if (const CmpStmt *cmp = SVFUtil::dyn_cast<CmpStmt>(stmt))
873 {
875 }
876 else if (SVFUtil::isa<UnaryOPStmt>(stmt))
877 {
878 }
879 else if (SVFUtil::isa<BranchStmt>(stmt))
880 {
881 // branch stmt is handled in hasBranchES
882 }
883 else if (const LoadStmt *load = SVFUtil::dyn_cast<LoadStmt>(stmt))
884 {
885 updateStateOnLoad(load);
886 }
887 else if (const StoreStmt *store = SVFUtil::dyn_cast<StoreStmt>(stmt))
888 {
889 updateStateOnStore(store);
890 }
891 else if (const CopyStmt *copy = SVFUtil::dyn_cast<CopyStmt>(stmt))
892 {
894 }
895 else if (const GepStmt *gep = SVFUtil::dyn_cast<GepStmt>(stmt))
896 {
898 }
899 else if (const SelectStmt *select = SVFUtil::dyn_cast<SelectStmt>(stmt))
900 {
902 }
903 else if (const PhiStmt *phi = SVFUtil::dyn_cast<PhiStmt>(stmt))
904 {
906 }
907 else if (const CallPE *callPE = SVFUtil::dyn_cast<CallPE>(stmt))
908 {
909 // To handle Call Edge
911 }
912 else if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(stmt))
913 {
914 updateStateOnRet(retPE);
915 }
916 else
917 assert(false && "implement this part");
918 // NullPtr is index 0, it should not be changed
919 assert(!getAbsStateFromTrace(stmt->getICFGNode())[IRGraph::NullPtr].isInterval() &&
920 !getAbsStateFromTrace(stmt->getICFGNode())[IRGraph::NullPtr].isAddr());
921}
922
924{
926 const RetICFGNode *retNode = callNode->getRetICFGNode();
927 if (retNode->getSVFStmts().size() > 0)
928 {
929 if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(*retNode->getSVFStmts().begin()))
930 {
932 if (!retPE->getLHSVar()->isPointer() && !retPE->getLHSVar()->isConstDataOrAggDataButNotNullPtr())
933 as[retPE->getLHSVarID()] = IntervalValue::top();
934 }
935 }
936 if (!retNode->getOutEdges().empty())
937 {
938 if (retNode->getOutEdges().size() == 1)
939 {
940
941 }
942 else
943 {
944 return;
945 }
946 }
949 for (const SVFBasicBlock * bb: callNode->getCalledFunction()->getReachableBBs())
950 {
951 for (const ICFGNode* node: bb->getICFGNodeList())
952 {
953 for (const SVFStmt *stmt: node->getSVFStmts())
954 {
955 if (const StoreStmt *store = SVFUtil::dyn_cast<StoreStmt>(stmt))
956 {
957 const SVFVar *rhsVar = store->getRHSVar();
958 u32_t lhs = store->getLHSVarID();
959 if (as.inVarToAddrsTable(lhs))
960 {
961 if (!rhsVar->isPointer() && !rhsVar->isConstDataOrAggDataButNotNullPtr())
962 {
963 const AbstractValue &addrs = as[lhs];
964 for (const auto &addr: addrs.getAddrs())
965 {
966 as.store(addr, IntervalValue::top());
967 }
968 }
969 }
970 }
971 }
972 }
973 }
974}
975
976// count the size of memory map
978{
979 if (count == 0)
980 {
981 generalNumMap["ES_Var_AVG_Num"] = 0;
982 generalNumMap["ES_Loc_AVG_Num"] = 0;
983 generalNumMap["ES_Var_Addr_AVG_Num"] = 0;
984 generalNumMap["ES_Loc_Addr_AVG_Num"] = 0;
985 }
986 ++count;
987}
988
990{
992 if (count > 0)
993 {
994 generalNumMap["ES_Var_AVG_Num"] /= count;
995 generalNumMap["ES_Loc_AVG_Num"] /= count;
996 generalNumMap["ES_Var_Addr_AVG_Num"] /= count;
997 generalNumMap["ES_Loc_Addr_AVG_Num"] /= count;
998 }
999 generalNumMap["SVF_STMT_NUM"] = count;
1000 generalNumMap["ICFG_Node_Num"] = _ae->svfir->getICFG()->nodeNum;
1001 u32_t callSiteNum = 0;
1004 for (const auto &it: *_ae->svfir->getICFG())
1005 {
1006 if (it.second->getFun())
1007 {
1008 funs.insert(it.second->getFun());
1009 }
1010 if (const CallICFGNode *callNode = dyn_cast<CallICFGNode>(it.second))
1011 {
1012 if (!isExtCall(callNode))
1013 {
1014 callSiteNum++;
1015 }
1016 else
1017 {
1019 }
1020 }
1021 }
1022 generalNumMap["Func_Num"] = funs.size();
1023 generalNumMap["EXT_CallSite_Num"] = extCallSiteNum;
1024 generalNumMap["NonEXT_CallSite_Num"] = callSiteNum;
1025 timeStatMap["Total_Time(sec)"] = (double)(endTime - startTime) / TIMEINTERVAL;
1026
1027}
1028
1030{
1031 std::string fullName(_ae->moduleName);
1032 std::string name;
1033 std::string moduleName;
1034 if (fullName.find('/') == std::string::npos)
1035 {
1036 std::string name = fullName;
1037 moduleName = name.substr(0, fullName.find('.'));
1038 }
1039 else
1040 {
1041 std::string name = fullName.substr(fullName.find('/'), fullName.size());
1042 moduleName = name.substr(0, fullName.find('.'));
1043 }
1044
1045 SVFUtil::outs() << "\n************************\n";
1046 SVFUtil::outs() << "################ (program : " << moduleName << ")###############\n";
1047 SVFUtil::outs().flags(std::ios::left);
1048 unsigned field_width = 30;
1049 for (NUMStatMap::iterator it = generalNumMap.begin(), eit = generalNumMap.end(); it != eit; ++it)
1050 {
1051 // format out put with width 20 space
1052 std::cout << std::setw(field_width) << it->first << it->second << "\n";
1053 }
1054 SVFUtil::outs() << "-------------------------------------------------------\n";
1055 for (TIMEStatMap::iterator it = timeStatMap.begin(), eit = timeStatMap.end(); it != eit; ++it)
1056 {
1057 // format out put with width 20 space
1058 SVFUtil::outs() << std::setw(field_width) << it->first << it->second << "\n";
1059 }
1060 SVFUtil::outs() << "Memory usage: " << memUsage << "\n";
1061
1062 SVFUtil::outs() << "#######################################################" << std::endl;
1063 SVFUtil::outs().flush();
1064}
1065
1067{
1068 // traverse every ICFGNode
1069 Set<std::string> ae_checkpoint_names = {"svf_assert"};
1070 Set<std::string> buf_checkpoint_names = {"UNSAFE_BUFACCESS", "SAFE_BUFACCESS"};
1071 Set<std::string> nullptr_checkpoint_names = {"UNSAFE_LOAD", "SAFE_LOAD"};
1072
1073 for (auto it = svfir->getICFG()->begin(); it != svfir->getICFG()->end(); ++it)
1074 {
1075 const ICFGNode* node = it->second;
1076 if (const CallICFGNode *call = SVFUtil::dyn_cast<CallICFGNode>(node))
1077 {
1078 if (const FunObjVar *fun = call->getCalledFunction())
1079 {
1080 if (ae_checkpoint_names.find(fun->getName()) !=
1081 ae_checkpoint_names.end())
1082 {
1083 checkpoints.insert(call);
1084 }
1086 {
1087 if (buf_checkpoint_names.find(fun->getName()) !=
1089 {
1090 checkpoints.insert(call);
1091 }
1092 }
1094 {
1095 if (nullptr_checkpoint_names.find(fun->getName()) !=
1097 {
1098 checkpoints.insert(call);
1099 }
1100 }
1101 }
1102 }
1103 }
1104}
1105
1107{
1108 if (checkpoints.size() == 0)
1109 {
1110 return;
1111 }
1112 else
1113 {
1114 SVFUtil::errs() << SVFUtil::errMsg("At least one svf_assert has not been checked!!") << "\n";
1115 for (const CallICFGNode* call: checkpoints)
1116 SVFUtil::errs() << call->toString() + "\n";
1117 assert(false);
1118 }
1119
1120}
1121
1123{
1124 AbstractState& as = getAbsStateFromTrace(gep->getICFGNode());
1125 u32_t rhs = gep->getRHSVarID();
1126 u32_t lhs = gep->getLHSVarID();
1127 IntervalValue offsetPair = as.getElementIndex(gep);
1129 APOffset lb = offsetPair.lb().getIntNumeral() < Options::MaxFieldLimit()?
1130 offsetPair.lb().getIntNumeral(): Options::MaxFieldLimit();
1131 APOffset ub = offsetPair.ub().getIntNumeral() < Options::MaxFieldLimit()?
1132 offsetPair.ub().getIntNumeral(): Options::MaxFieldLimit();
1133 for (APOffset i = lb; i <= ub; i++)
1134 gepAddrs.join_with(as.getGepObjAddrs(rhs,IntervalValue(i)));
1135 as[lhs] = gepAddrs;
1136}
1137
1139{
1140 AbstractState& as = getAbsStateFromTrace(select->getICFGNode());
1141 u32_t res = select->getResID();
1142 u32_t tval = select->getTrueValue()->getId();
1143 u32_t fval = select->getFalseValue()->getId();
1144 u32_t cond = select->getCondition()->getId();
1145 if (as[cond].getInterval().is_numeral())
1146 {
1147 as[res] = as[cond].getInterval().is_zero() ? as[fval] : as[tval];
1148 }
1149 else
1150 {
1151 as[res] = as[tval];
1152 as[res].join_with(as[fval]);
1153 }
1154}
1155
1157{
1158 const ICFGNode* icfgNode = phi->getICFGNode();
1160 u32_t res = phi->getResID();
1162 for (u32_t i = 0; i < phi->getOpVarNum(); i++)
1163 {
1164 NodeID curId = phi->getOpVarID(i);
1165 const ICFGNode* opICFGNode = phi->getOpICFGNode(i);
1167 {
1171 // if IntraEdge, check the condition, if it is feasible, join the value
1172 // if IntraEdge but not conditional edge, join the value
1173 // if not IntraEdge, join the value
1174 if (edge)
1175 {
1176 const IntraCFGEdge* intraEdge = SVFUtil::cast<IntraCFGEdge>(edge);
1177 if (intraEdge->getCondition())
1178 {
1180 rhs.join_with(opAs[curId]);
1181 }
1182 else
1183 rhs.join_with(opAs[curId]);
1184 }
1185 else
1186 {
1187 rhs.join_with(opAs[curId]);
1188 }
1189 }
1190 }
1191 as[res] = rhs;
1192}
1193
1194
1196{
1197 AbstractState& as = getAbsStateFromTrace(callPE->getICFGNode());
1198 NodeID lhs = callPE->getLHSVarID();
1199 NodeID rhs = callPE->getRHSVarID();
1200 as[lhs] = as[rhs];
1201}
1202
1204{
1206 NodeID lhs = retPE->getLHSVarID();
1207 NodeID rhs = retPE->getRHSVarID();
1208 as[lhs] = as[rhs];
1209}
1210
1211
1213{
1214 AbstractState& as = getAbsStateFromTrace(addr->getICFGNode());
1215 as.initObjVar(SVFUtil::cast<ObjVar>(addr->getRHSVar()));
1216 if (addr->getRHSVar()->getType()->getKind() == SVFType::SVFIntegerTy)
1217 as[addr->getRHSVarID()].getInterval().meet_with(utils->getRangeLimitFromType(addr->getRHSVar()->getType()));
1218 as[addr->getLHSVarID()] = as[addr->getRHSVarID()];
1219}
1220
1221
1223{
1227 const ICFGNode* node = binary->getICFGNode();
1229 u32_t op0 = binary->getOpVarID(0);
1230 u32_t op1 = binary->getOpVarID(1);
1231 u32_t res = binary->getResID();
1232 if (!as.inVarToValTable(op0)) as[op0] = IntervalValue::top();
1233 if (!as.inVarToValTable(op1)) as[op1] = IntervalValue::top();
1234 IntervalValue &lhs = as[op0].getInterval(), &rhs = as[op1].getInterval();
1236 switch (binary->getOpcode())
1237 {
1238 case BinaryOPStmt::Add:
1239 case BinaryOPStmt::FAdd:
1240 resVal = (lhs + rhs);
1241 break;
1242 case BinaryOPStmt::Sub:
1243 case BinaryOPStmt::FSub:
1244 resVal = (lhs - rhs);
1245 break;
1246 case BinaryOPStmt::Mul:
1247 case BinaryOPStmt::FMul:
1248 resVal = (lhs * rhs);
1249 break;
1250 case BinaryOPStmt::SDiv:
1251 case BinaryOPStmt::FDiv:
1252 case BinaryOPStmt::UDiv:
1253 resVal = (lhs / rhs);
1254 break;
1255 case BinaryOPStmt::SRem:
1256 case BinaryOPStmt::FRem:
1257 case BinaryOPStmt::URem:
1258 resVal = (lhs % rhs);
1259 break;
1260 case BinaryOPStmt::Xor:
1261 resVal = (lhs ^ rhs);
1262 break;
1263 case BinaryOPStmt::And:
1264 resVal = (lhs & rhs);
1265 break;
1266 case BinaryOPStmt::Or:
1267 resVal = (lhs | rhs);
1268 break;
1269 case BinaryOPStmt::AShr:
1270 resVal = (lhs >> rhs);
1271 break;
1272 case BinaryOPStmt::Shl:
1273 resVal = (lhs << rhs);
1274 break;
1275 case BinaryOPStmt::LShr:
1276 resVal = (lhs >> rhs);
1277 break;
1278 default:
1279 assert(false && "undefined binary: ");
1280 }
1281 as[res] = resVal;
1282}
1283
1285{
1286 AbstractState& as = getAbsStateFromTrace(cmp->getICFGNode());
1287 u32_t op0 = cmp->getOpVarID(0);
1288 u32_t op1 = cmp->getOpVarID(1);
1289 // if it is address
1290 if (as.inVarToAddrsTable(op0) && as.inVarToAddrsTable(op1))
1291 {
1293 AddressValue addrOp0 = as[op0].getAddrs();
1294 AddressValue addrOp1 = as[op1].getAddrs();
1295 u32_t res = cmp->getResID();
1296 if (addrOp0.equals(addrOp1))
1297 {
1298 resVal = IntervalValue(1, 1);
1299 }
1300 else if (addrOp0.hasIntersect(addrOp1))
1301 {
1302 resVal = IntervalValue(0, 1);
1303 }
1304 else
1305 {
1306 resVal = IntervalValue(0, 0);
1307 }
1308 as[res] = resVal;
1309 }
1310 // if op0 or op1 is nullptr, compare abstractValue instead of touching addr or interval
1311 else if (op0 == IRGraph::NullPtr || op1 == IRGraph::NullPtr)
1312 {
1313 u32_t res = cmp->getResID();
1314 IntervalValue resVal = (as[op0].equals(as[op1])) ? IntervalValue(1, 1) : IntervalValue(0, 0);
1315 as[res] = resVal;
1316 }
1317 else
1318 {
1319 if (!as.inVarToValTable(op0))
1321 if (!as.inVarToValTable(op1))
1323 u32_t res = cmp->getResID();
1324 if (as.inVarToValTable(op0) && as.inVarToValTable(op1))
1325 {
1327 if (as[op0].isInterval() && as[op1].isInterval())
1328 {
1329 IntervalValue &lhs = as[op0].getInterval(),
1330 &rhs = as[op1].getInterval();
1331 // AbstractValue
1332 auto predicate = cmp->getPredicate();
1333 switch (predicate)
1334 {
1335 case CmpStmt::ICMP_EQ:
1336 case CmpStmt::FCMP_OEQ:
1337 case CmpStmt::FCMP_UEQ:
1338 resVal = (lhs == rhs);
1339 // resVal = (lhs.getInterval() == rhs.getInterval());
1340 break;
1341 case CmpStmt::ICMP_NE:
1342 case CmpStmt::FCMP_ONE:
1343 case CmpStmt::FCMP_UNE:
1344 resVal = (lhs != rhs);
1345 break;
1346 case CmpStmt::ICMP_UGT:
1347 case CmpStmt::ICMP_SGT:
1348 case CmpStmt::FCMP_OGT:
1349 case CmpStmt::FCMP_UGT:
1350 resVal = (lhs > rhs);
1351 break;
1352 case CmpStmt::ICMP_UGE:
1353 case CmpStmt::ICMP_SGE:
1354 case CmpStmt::FCMP_OGE:
1355 case CmpStmt::FCMP_UGE:
1356 resVal = (lhs >= rhs);
1357 break;
1358 case CmpStmt::ICMP_ULT:
1359 case CmpStmt::ICMP_SLT:
1360 case CmpStmt::FCMP_OLT:
1361 case CmpStmt::FCMP_ULT:
1362 resVal = (lhs < rhs);
1363 break;
1364 case CmpStmt::ICMP_ULE:
1365 case CmpStmt::ICMP_SLE:
1366 case CmpStmt::FCMP_OLE:
1367 case CmpStmt::FCMP_ULE:
1368 resVal = (lhs <= rhs);
1369 break;
1371 resVal = IntervalValue(0, 0);
1372 break;
1373 case CmpStmt::FCMP_TRUE:
1374 resVal = IntervalValue(1, 1);
1375 break;
1376 default:
1377 assert(false && "undefined compare: ");
1378 }
1379 as[res] = resVal;
1380 }
1381 else if (as[op0].isAddr() && as[op1].isAddr())
1382 {
1383 AddressValue &lhs = as[op0].getAddrs(),
1384 &rhs = as[op1].getAddrs();
1385 auto predicate = cmp->getPredicate();
1386 switch (predicate)
1387 {
1388 case CmpStmt::ICMP_EQ:
1389 case CmpStmt::FCMP_OEQ:
1390 case CmpStmt::FCMP_UEQ:
1391 {
1392 if (lhs.hasIntersect(rhs))
1393 {
1394 resVal = IntervalValue(0, 1);
1395 }
1396 else if (lhs.empty() && rhs.empty())
1397 {
1398 resVal = IntervalValue(1, 1);
1399 }
1400 else
1401 {
1402 resVal = IntervalValue(0, 0);
1403 }
1404 break;
1405 }
1406 case CmpStmt::ICMP_NE:
1407 case CmpStmt::FCMP_ONE:
1408 case CmpStmt::FCMP_UNE:
1409 {
1410 if (lhs.hasIntersect(rhs))
1411 {
1412 resVal = IntervalValue(0, 1);
1413 }
1414 else if (lhs.empty() && rhs.empty())
1415 {
1416 resVal = IntervalValue(0, 0);
1417 }
1418 else
1419 {
1420 resVal = IntervalValue(1, 1);
1421 }
1422 break;
1423 }
1424 case CmpStmt::ICMP_UGT:
1425 case CmpStmt::ICMP_SGT:
1426 case CmpStmt::FCMP_OGT:
1427 case CmpStmt::FCMP_UGT:
1428 {
1429 if (lhs.size() == 1 && rhs.size() == 1)
1430 {
1431 resVal = IntervalValue(*lhs.begin() > *rhs.begin());
1432 }
1433 else
1434 {
1435 resVal = IntervalValue(0, 1);
1436 }
1437 break;
1438 }
1439 case CmpStmt::ICMP_UGE:
1440 case CmpStmt::ICMP_SGE:
1441 case CmpStmt::FCMP_OGE:
1442 case CmpStmt::FCMP_UGE:
1443 {
1444 if (lhs.size() == 1 && rhs.size() == 1)
1445 {
1446 resVal = IntervalValue(*lhs.begin() >= *rhs.begin());
1447 }
1448 else
1449 {
1450 resVal = IntervalValue(0, 1);
1451 }
1452 break;
1453 }
1454 case CmpStmt::ICMP_ULT:
1455 case CmpStmt::ICMP_SLT:
1456 case CmpStmt::FCMP_OLT:
1457 case CmpStmt::FCMP_ULT:
1458 {
1459 if (lhs.size() == 1 && rhs.size() == 1)
1460 {
1461 resVal = IntervalValue(*lhs.begin() < *rhs.begin());
1462 }
1463 else
1464 {
1465 resVal = IntervalValue(0, 1);
1466 }
1467 break;
1468 }
1469 case CmpStmt::ICMP_ULE:
1470 case CmpStmt::ICMP_SLE:
1471 case CmpStmt::FCMP_OLE:
1472 case CmpStmt::FCMP_ULE:
1473 {
1474 if (lhs.size() == 1 && rhs.size() == 1)
1475 {
1476 resVal = IntervalValue(*lhs.begin() <= *rhs.begin());
1477 }
1478 else
1479 {
1480 resVal = IntervalValue(0, 1);
1481 }
1482 break;
1483 }
1485 resVal = IntervalValue(0, 0);
1486 break;
1487 case CmpStmt::FCMP_TRUE:
1488 resVal = IntervalValue(1, 1);
1489 break;
1490 default:
1491 assert(false && "undefined compare: ");
1492 }
1493 as[res] = resVal;
1494 }
1495 }
1496 }
1497}
1498
1500{
1502 u32_t rhs = load->getRHSVarID();
1503 u32_t lhs = load->getLHSVarID();
1504 as[lhs] = as.loadValue(rhs);
1505}
1506
1508{
1510 u32_t rhs = store->getRHSVarID();
1511 u32_t lhs = store->getLHSVarID();
1512 as.storeValue(lhs, as[rhs]);
1513}
1514
1516{
1517 auto getZExtValue = [](AbstractState& as, const SVFVar* var)
1518 {
1519 const SVFType* type = var->getType();
1520 if (SVFUtil::isa<SVFIntegerType>(type))
1521 {
1522 u32_t bits = type->getByteSize() * 8;
1523 if (as[var->getId()].getInterval().is_numeral())
1524 {
1525 if (bits == 8)
1526 {
1527 int8_t signed_i8_value = as[var->getId()].getInterval().getIntNumeral();
1530 }
1531 else if (bits == 16)
1532 {
1533 s16_t signed_i16_value = as[var->getId()].getInterval().getIntNumeral();
1536 }
1537 else if (bits == 32)
1538 {
1539 s32_t signed_i32_value = as[var->getId()].getInterval().getIntNumeral();
1542 }
1543 else if (bits == 64)
1544 {
1545 s64_t signed_i64_value = as[var->getId()].getInterval().getIntNumeral();
1547 // we only support i64 at most
1548 }
1549 else
1550 assert(false && "cannot support int type other than u8/16/32/64");
1551 }
1552 else
1553 {
1554 return IntervalValue::top(); // TODO: may have better solution
1555 }
1556 }
1557 return IntervalValue::top(); // TODO: may have better solution
1558 };
1559
1560 auto getTruncValue = [&](const AbstractState& as, const SVFVar* var,
1561 const SVFType* dstType)
1562 {
1563 const IntervalValue& itv = as[var->getId()].getInterval();
1564 if(itv.isBottom()) return itv;
1565 // get the value of ub and lb
1567 s64_t int_ub = itv.ub().getIntNumeral();
1568 // get dst type
1569 u32_t dst_bits = dstType->getByteSize() * 8;
1570 if (dst_bits == 8)
1571 {
1572 // get the signed value of ub and lb
1573 int8_t s8_lb = static_cast<int8_t>(int_lb);
1574 int8_t s8_ub = static_cast<int8_t>(int_ub);
1575 if (s8_lb > s8_ub)
1576 {
1577 // return range of s8
1579 }
1580 return IntervalValue(s8_lb, s8_ub);
1581 }
1582 else if (dst_bits == 16)
1583 {
1584 // get the signed value of ub and lb
1585 s16_t s16_lb = static_cast<s16_t>(int_lb);
1586 s16_t s16_ub = static_cast<s16_t>(int_ub);
1587 if (s16_lb > s16_ub)
1588 {
1589 // return range of s16
1591 }
1592 return IntervalValue(s16_lb, s16_ub);
1593 }
1594 else if (dst_bits == 32)
1595 {
1596 // get the signed value of ub and lb
1597 s32_t s32_lb = static_cast<s32_t>(int_lb);
1598 s32_t s32_ub = static_cast<s32_t>(int_ub);
1599 if (s32_lb > s32_ub)
1600 {
1601 // return range of s32
1603 }
1604 return IntervalValue(s32_lb, s32_ub);
1605 }
1606 else
1607 {
1608 assert(false && "cannot support dst int type other than u8/16/32");
1609 abort();
1610 }
1611 };
1612
1613 AbstractState& as = getAbsStateFromTrace(copy->getICFGNode());
1614 u32_t lhs = copy->getLHSVarID();
1615 u32_t rhs = copy->getRHSVarID();
1616
1617 if (copy->getCopyKind() == CopyStmt::COPYVAL)
1618 {
1619 as[lhs] = as[rhs];
1620 }
1621 else if (copy->getCopyKind() == CopyStmt::ZEXT)
1622 {
1623 as[lhs] = getZExtValue(as, copy->getRHSVar());
1624 }
1625 else if (copy->getCopyKind() == CopyStmt::SEXT)
1626 {
1627 as[lhs] = as[rhs].getInterval();
1628 }
1629 else if (copy->getCopyKind() == CopyStmt::FPTOSI)
1630 {
1631 as[lhs] = as[rhs].getInterval();
1632 }
1633 else if (copy->getCopyKind() == CopyStmt::FPTOUI)
1634 {
1635 as[lhs] = as[rhs].getInterval();
1636 }
1637 else if (copy->getCopyKind() == CopyStmt::SITOFP)
1638 {
1639 as[lhs] = as[rhs].getInterval();
1640 }
1641 else if (copy->getCopyKind() == CopyStmt::UITOFP)
1642 {
1643 as[lhs] = as[rhs].getInterval();
1644 }
1645 else if (copy->getCopyKind() == CopyStmt::TRUNC)
1646 {
1647 as[lhs] = getTruncValue(as, copy->getRHSVar(), copy->getLHSVar()->getType());
1648 }
1649 else if (copy->getCopyKind() == CopyStmt::FPTRUNC)
1650 {
1651 as[lhs] = as[rhs].getInterval();
1652 }
1653 else if (copy->getCopyKind() == CopyStmt::INTTOPTR)
1654 {
1655 //insert nullptr
1656 }
1657 else if (copy->getCopyKind() == CopyStmt::PTRTOINT)
1658 {
1660 }
1661 else if (copy->getCopyKind() == CopyStmt::BITCAST)
1662 {
1663 if (as[rhs].isAddr())
1664 {
1665 as[lhs] = as[rhs];
1666 }
1667 else
1668 {
1669 // do nothing
1670 }
1671 }
1672 else
1673 assert(false && "undefined copy kind");
1674}
1675
1676
1677
#define TIMEINTERVAL
Definition SVFType.h:540
newitem type
Definition cJSON.cpp:2739
copy
Definition cJSON.cpp:414
const char *const name
Definition cJSON.h:264
void performStat() override
std::string getMemUsage()
AbstractInterpretation * _ae
Handles external API calls and manages abstract states.
Definition AbsExtAPI.h:44
void handleExtAPI(const CallICFGNode *call)
Handles an external API call.
IntervalValue getRangeLimitFromType(const SVFType *type)
Gets the range limit from a type.
void updateStateOnCall(const CallPE *callPE)
virtual bool isRecursiveCall(const CallICFGNode *callNode)
virtual void recursiveCallPass(const CallICFGNode *callNode)
void updateStateOnStore(const StoreStmt *store)
virtual bool isDirectCall(const CallICFGNode *callNode)
bool hasAbsStateFromTrace(const ICFGNode *node)
virtual void handleCycleWTO(const ICFGCycleWTO *cycle)
handle wto cycle (loop)
Set< const FunObjVar * > recursiveFuns
void updateStateOnGep(const GepStmt *gep)
virtual void extCallPass(const CallICFGNode *callNode)
virtual void handleGlobalNode()
Global ICFGNode is handled at the entry of the program,.
bool mergeStatesFromPredecessors(const ICFGNode *icfgNode)
virtual void directCallFunPass(const CallICFGNode *callNode)
void handleWTOComponent(const ICFGWTOComp *wtoComp)
virtual bool isExtCall(const CallICFGNode *callNode)
virtual bool isIndirectCall(const CallICFGNode *callNode)
void initWTO()
Compute IWTO for each function partition entry.
void updateStateOnPhi(const PhiStmt *phi)
bool isBranchFeasible(const IntraCFGEdge *intraEdge, AbstractState &as)
std::vector< std::unique_ptr< AEDetector > > detectors
AbstractState & getAbsStateFromTrace(const ICFGNode *node)
Retrieves the abstract state from the trace for a given ICFG node.
Set< const CallICFGNode * > checkpoints
bool isSwitchBranchFeasible(const SVFVar *var, s64_t succ, AbstractState &as)
void updateStateOnSelect(const SelectStmt *select)
virtual void handleSVFStatement(const SVFStmt *stmt)
virtual void indirectCallFunPass(const CallICFGNode *callNode)
SVFIR * svfir
protected data members, also used in subclasses
virtual void runOnModule(ICFG *icfg)
virtual bool isRecursiveCallSite(const CallICFGNode *callNode, const FunObjVar *)
virtual bool isRecursiveFun(const FunObjVar *fun)
void updateStateOnAddr(const AddrStmt *addr)
virtual ~AbstractInterpretation()
Destructor.
bool isCmpBranchFeasible(const CmpStmt *cmpStmt, s64_t succ, AbstractState &as)
virtual void handleCallSite(const ICFGNode *node)
void updateStateOnRet(const RetPE *retPE)
void handleWTOComponents(const std::list< const ICFGWTOComp * > &wtoComps)
Handle two types of WTO components (singleton and cycle)
void updateStateOnCopy(const CopyStmt *copy)
Set< std::pair< const CallICFGNode *, NodeID > > nonRecursiveCallSites
void updateStateOnLoad(const LoadStmt *load)
void updateStateOnBinary(const BinaryOPStmt *binary)
Map< const ICFGNode *, AbstractState > abstractTrace
std::vector< const CallICFGNode * > callSiteStack
virtual void handleSingletonWTO(const ICFGSingletonWTO *icfgSingletonWto)
handle instructions in svf basic blocks
Map< s32_t, s32_t > _switch_lhsrhs_predicate
void updateStateOnCmp(const CmpStmt *cmp)
virtual void SkipRecursiveCall(const CallICFGNode *callnode)
Map< const FunObjVar *, const ICFGWTO * > funcToWTO
AddressValue & getAddrs()
bool meet_with(const AddressValue &other)
Return a intersected AddressValue.
AddrSet::const_iterator begin() const
static AndersenWaveDiff * createAndersenWaveDiff(SVFIR *_pag)
Create an singleton instance directly instead of invoking llvm pass manager.
Definition Andersen.h:407
NodeID getRHSVarID() const
NodeID getLHSVarID() const
s64_t getIntNumeral() const
const CallGraphNode * getCallGraphNode(const std::string &name) const
Get call graph node.
@ ICMP_SGT
signed greater than
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ ICMP_UGE
unsigned greater or equal
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ ICMP_ULE
unsigned less or equal
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ ICMP_NE
not equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_ULT
unsigned less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ ICMP_SLT
signed less than
@ ICMP_UGT
unsigned greater than
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_SLE
signed less or equal
virtual const FunObjVar * getFunction() const
Get containing function, or null for globals/constants.
bool isDeclaration() const
iterator begin()
Iterators.
u32_t nodeNum
total num of edge
NodeType * getGNode(NodeID id) const
Get a node.
const GEdgeSetTy & getInEdges() const
virtual const std::string toString() const
Definition ICFG.cpp:60
const SVFStmtList & getSVFStmts() const
Definition ICFGNode.h:117
ICFGEdge * getICFGEdge(const ICFGNode *src, const ICFGNode *dst, ICFGEdge::ICFGEdgeK kind)
Get a SVFG edge according to src and dst.
Definition ICFG.cpp:311
FunEntryICFGNode * getFunEntryICFGNode(const FunObjVar *fun)
Add a function entry node.
Definition ICFG.cpp:242
GlobalICFGNode * getGlobalICFGNode() const
Definition ICFG.h:230
NodeID getBlkPtr() const
Definition IRGraph.h:255
void meet_with(const IntervalValue &other)
Return a intersected IntervalValue.
static BoundedInt minus_infinity()
Get minus infinity -inf.
static BoundedInt plus_infinity()
Get plus infinity +inf.
static IntervalValue top()
Create the IntervalValue [-inf, +inf].
const BoundedInt & lb() const
Return the lower bound.
static const OptionMap< u32_t > HandleRecur
recursion handling mode, Default: TOP
Definition Options.h:245
static const Option< u32_t > MaxFieldLimit
Maximum number of field derivations for an object.
Definition Options.h:35
static const Option< u32_t > WidenDelay
Definition Options.h:243
static const Option< bool > NullDerefCheck
nullptr dereference checker, Default: false
Definition Options.h:253
static const Option< bool > PStat
Definition Options.h:116
static const Option< bool > BufferOverflowCheck
buffer overflow checker, Default: false
Definition Options.h:251
CallGraph * getCallGraph() const
Return call graph.
SCCDetection< CallGraph * > CallGraphSCC
CallGraphSCC * getCallGraphSCC() const
Return call graph SCC.
const CallGraph * getCallGraph()
Get CG.
Definition SVFIR.h:180
ICFG * getICFG() const
Definition SVFIR.h:163
const CallSiteToFunPtrMap & getIndirectCallsites() const
Add/get indirect callsites.
Definition SVFIR.h:374
static SVFIR * getPAG(bool buildFromFile=false)
Singleton design here to make sure we only have one instance during any analysis.
Definition SVFIR.h:116
NUMStatMap generalNumMap
Definition SVFStat.h:76
virtual void endClk()
Definition SVFStat.h:63
std::string moduleName
Definition SVFStat.h:99
TIMEStatMap timeStatMap
Definition SVFStat.h:78
virtual void startClk()
Definition SVFStat.h:58
double endTime
Definition SVFStat.h:81
double startTime
Definition SVFStat.h:80
ICFGNode * getICFGNode() const
u32_t getByteSize() const
Definition SVFType.h:249
std::string errMsg(const std::string &msg)
Print error message by converting a string into red string output.
Definition SVFUtil.cpp:78
std::ostream & errs()
Overwrite llvm::errs()
Definition SVFUtil.h:58
bool isExtCall(const FunObjVar *fun)
Definition SVFUtil.cpp:437
std::ostream & outs()
Overwrite llvm::outs()
Definition SVFUtil.h:52
for isBitcode
Definition BasicTypes.h:68
u32_t NodeID
Definition GeneralType.h:56
s64_t APOffset
Definition GeneralType.h:60
signed short s16_t
Definition GeneralType.h:54
unsigned short u16_t
Definition GeneralType.h:53
llvm::IRBuilder IRBuilder
Definition BasicTypes.h:74
signed s32_t
Definition GeneralType.h:48
unsigned u32_t
Definition GeneralType.h:47
signed long long s64_t
Definition GeneralType.h:50