Static Value-Flow Analysis
Loading...
Searching...
No Matches
AbstractInterpretation.cpp
Go to the documentation of this file.
1//===- AbstractExecution.cpp -- Abstract Execution---------------------------------//
2//
3// SVF: Static Value-Flow Analysis
4//
5// Copyright (C) <2013-> <Yulei Sui>
6//
7
8// This program is free software: you can redistribute it and/or modify
9// it under the terms of the GNU Affero General Public License as published by
10// the Free Software Foundation, either version 3 of the License, or
11// (at your option) any later version.
12
13// This program is distributed in the hope that it will be useful,
14// but WITHOUT ANY WARRANTY; without even the implied warranty of
15// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16// GNU Affero General Public License for more details.
17
18// You should have received a copy of the GNU Affero General Public License
19// along with this program. If not, see <http://www.gnu.org/licenses/>.
20//
21//===----------------------------------------------------------------------===//
22
23
24//
25// Created on: Jan 10, 2024
26// Author: Xiao Cheng, Jiawei Wang
27//
28
30#include "AE/Svfexe/AbsExtAPI.h"
31#include "SVFIR/SVFIR.h"
32#include "Util/Options.h"
33#include "Util/WorkList.h"
34#include "Graphs/CallGraph.h"
35#include "WPA/Andersen.h"
36#include <cmath>
37
38using namespace SVF;
39using namespace SVFUtil;
40using namespace z3;
41
42
44{
45 stat->startClk();
46 icfg = _icfg;
49
52
53 analyse();
55 stat->endClk();
57 if (Options::PStat())
59 for (auto& detector: detectors)
60 detector->reportBug();
61}
62
69{
70 delete stat;
71 for (auto it: funcToWTO)
72 delete it.second;
73}
74
85{
87 // Detect if the call graph has cycles by finding its strongly connected components (SCC)
89 callGraphScc->find();
90 CallGraph* callGraph = ander->getCallGraph();
91
92 // Iterate through the call graph
93 for (auto it = callGraph->begin(); it != callGraph->end(); it++)
94 {
95 // Check if the current function is part of a cycle
96 if (callGraphScc->isInCycle(it->second->getId()))
97 recursiveFuns.insert(it->second->getFunction()); // Mark the function as recursive
98
99 // In TOP mode, calculate the WTO
100 if (Options::HandleRecur() == TOP)
101 {
102 if (it->second->getFunction()->isDeclaration())
103 continue;
104 auto* wto = new ICFGWTO(icfg, icfg->getFunEntryICFGNode(it->second->getFunction()));
105 wto->init();
106 funcToWTO[it->second->getFunction()] = wto;
107 }
108 // In WIDEN_TOP or WIDEN_NARROW mode, calculate the IWTO
109 else if (Options::HandleRecur() == WIDEN_ONLY ||
111 {
112 const FunObjVar *fun = it->second->getFunction();
113 if (fun->isDeclaration())
114 continue;
115
116 NodeID repNodeId = callGraphScc->repNode(it->second->getId());
117 auto cgSCCNodes = callGraphScc->subNodes(repNodeId);
118
119 // Identify if this node is an SCC entry (nodes who have incoming edges
120 // from nodes outside the SCC). Also identify non-recursive callsites.
121 bool isEntry = false;
122 if (it->second->getInEdges().empty())
123 isEntry = true;
124 for (auto inEdge: it->second->getInEdges())
125 {
126 NodeID srcNodeId = inEdge->getSrcID();
127 if (!cgSCCNodes.test(srcNodeId))
128 {
129 isEntry = true;
130 const CallICFGNode *callSite = nullptr;
131 if (inEdge->isDirectCallEdge())
132 callSite = *(inEdge->getDirectCalls().begin());
133 else if (inEdge->isIndirectCallEdge())
134 callSite = *(inEdge->getIndirectCalls().begin());
135 else
136 assert(false && "CallGraphEdge must "
137 "be either direct or indirect!");
138
140 {callSite, inEdge->getDstNode()->getFunction()->getId()});
141 }
142 }
143
144 // Compute IWTO for the function partition entered from each partition entry
145 if (isEntry)
146 {
148 cgSCCNodes, callGraph);
149 iwto->init();
150 funcToWTO[it->second->getFunction()] = iwto;
151 }
152 }
153 else
154 assert(false && "Invalid recursion mode specified!");
155 }
156}
157
160{
161 initWTO();
162 // handle Global ICFGNode of SVFModule
166 if (const CallGraphNode* cgn = svfir->getCallGraph()->getCallGraphNode("main"))
167 {
168 const ICFGWTO* wto = funcToWTO[cgn->getFunction()];
169 handleWTOComponents(wto->getWTOComponents());
170 }
171}
172
175{
176 const ICFGNode* node = icfg->getGlobalICFGNode();
179 // Global Node, we just need to handle addr, load, store, copy and gep
180 for (const SVFStmt *stmt: node->getSVFStmts())
181 {
183 }
184}
185
190{
191 std::vector<AbstractState> workList;
193 for (auto& edge: icfgNode->getInEdges())
194 {
195 if (abstractTrace.find(edge->getSrcNode()) != abstractTrace.end())
196 {
197
198 if (const IntraCFGEdge *intraCfgEdge =
199 SVFUtil::dyn_cast<IntraCFGEdge>(edge))
200 {
201 AbstractState tmpEs = abstractTrace[edge->getSrcNode()];
202 if (intraCfgEdge->getCondition())
203 {
205 {
206 workList.push_back(tmpEs);
207 }
208 else
209 {
210 // do nothing
211 }
212 }
213 else
214 {
215 workList.push_back(tmpEs);
216 }
217 }
218 else if (const CallCFGEdge *callCfgEdge =
219 SVFUtil::dyn_cast<CallCFGEdge>(edge))
220 {
221
222 // context insensitive implementation
223 workList.push_back(
224 abstractTrace[callCfgEdge->getSrcNode()]);
225 }
226 else if (const RetCFGEdge *retCfgEdge =
227 SVFUtil::dyn_cast<RetCFGEdge>(edge))
228 {
229 switch (Options::HandleRecur())
230 {
231 case TOP:
232 {
233 workList.push_back(abstractTrace[retCfgEdge->getSrcNode()]);
234 break;
235 }
236 case WIDEN_ONLY:
237 case WIDEN_NARROW:
238 {
239 const RetICFGNode* returnSite = SVFUtil::dyn_cast<RetICFGNode>(icfgNode);
240 const CallICFGNode* callSite = returnSite->getCallICFGNode();
242 workList.push_back(abstractTrace[retCfgEdge->getSrcNode()]);
243 }
244 }
245 }
246 else
247 assert(false && "Unhandled ICFGEdge type encountered!");
248 }
249 }
250 if (workList.size() == 0)
251 {
252 return false;
253 }
254 else
255 {
256 while (!workList.empty())
257 {
258 preAs.joinWith(workList.back());
259 workList.pop_back();
260 }
261 // Has ES on the in edges - Feasible block
262 // update post as
263 abstractTrace[icfgNode] = preAs;
264 return true;
265 }
266}
267
268
271{
273 // get cmp stmt's op0, op1, and predicate
274 NodeID op0 = cmpStmt->getOpVarID(0);
275 NodeID op1 = cmpStmt->getOpVarID(1);
276 NodeID res_id = cmpStmt->getResID();
277 s32_t predicate = cmpStmt->getPredicate();
278 // if op0 or op1 is nullptr, no need to change value, just copy the state
280 {
281 as = new_es;
282 return true;
283 }
284 // if op0 or op1 is undefined, return;
285 // skip address compare
286 if (new_es.inVarToAddrsTable(op0) || new_es.inVarToAddrsTable(op1))
287 {
288 as = new_es;
289 return true;
290 }
291 const LoadStmt *load_op0 = nullptr;
292 const LoadStmt *load_op1 = nullptr;
293 // get '%1 = load i32 s', and load inst may not exist
295 if (!loadVar0->getInEdges().empty())
296 {
297 SVFStmt *loadVar0InStmt = *loadVar0->getInEdges().begin();
298 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar0InStmt))
299 {
301 }
302 else if (const CopyStmt *copyStmt = SVFUtil::dyn_cast<CopyStmt>(loadVar0InStmt))
303 {
304 loadVar0 = svfir->getGNode(copyStmt->getRHSVarID());
305 if (!loadVar0->getInEdges().empty())
306 {
307 SVFStmt *loadVar0InStmt2 = *loadVar0->getInEdges().begin();
308 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar0InStmt2))
309 {
311 }
312 }
313 }
314 }
315
317 if (!loadVar1->getInEdges().empty())
318 {
319 SVFStmt *loadVar1InStmt = *loadVar1->getInEdges().begin();
320 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar1InStmt))
321 {
323 }
324 else if (const CopyStmt *copyStmt = SVFUtil::dyn_cast<CopyStmt>(loadVar1InStmt))
325 {
326 loadVar1 = svfir->getGNode(copyStmt->getRHSVarID());
327 if (!loadVar1->getInEdges().empty())
328 {
329 SVFStmt *loadVar1InStmt2 = *loadVar1->getInEdges().begin();
330 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar1InStmt2))
331 {
333 }
334 }
335 }
336 }
337 // for const X const, we may get concrete resVal instantly
338 // for var X const, we may get [0,1] if the intersection of var and const is not empty set
339 IntervalValue resVal = new_es[res_id].getInterval();
341 // If Var X const generates bottom value, it means this branch path is not feasible.
342 if (resVal.isBottom())
343 {
344 return false;
345 }
346
347 bool b0 = new_es[op0].getInterval().is_numeral();
348 bool b1 = new_es[op1].getInterval().is_numeral();
349
350 // if const X var, we should reverse op0 and op1.
351 if (b0 && !b1)
352 {
353 std::swap(op0, op1);
354 std::swap(load_op0, load_op1);
355 predicate = _switch_lhsrhs_predicate[predicate];
356 }
357 else
358 {
359 // if var X var, we cannot preset the branch condition to infer the intervals of var0,var1
360 if (!b0 && !b1)
361 {
362 as = new_es;
363 return true;
364 }
365 // if const X const, we can instantly get the resVal
366 else if (b0 && b1)
367 {
368 as = new_es;
369 return true;
370 }
371 }
372 // if cmp is 'var X const == false', we should reverse predicate 'var X' const == true'
373 // X' is reverse predicate of X
374 if (succ == 0)
375 {
376 predicate = _reverse_predicate[predicate];
377 }
378 else {}
379 // change interval range according to the compare predicate
380 AddressValue addrs;
381 if(load_op0 && new_es.inVarToAddrsTable(load_op0->getRHSVarID()))
382 addrs = new_es[load_op0->getRHSVarID()].getAddrs();
383
384 IntervalValue &lhs = new_es[op0].getInterval(), &rhs = new_es[op1].getInterval();
385 switch (predicate)
386 {
390 {
391 // Var == Const, so [var.lb, var.ub].meet_with(const)
393 // if lhs is register value, we should also change its mem obj
394 for (const auto &addr: addrs)
395 {
396 NodeID objId = new_es.getIDFromAddr(addr);
397 if (new_es.inAddrToValTable(objId))
398 {
399 new_es.load(addr).meet_with(rhs);
400 }
401 }
402 break;
403 }
407 // Compliment set
408 break;
413 // Var > Const, so [var.lb, var.ub].meet_with([Const+1, +INF])
414 lhs.meet_with(IntervalValue(rhs.lb() + 1, IntervalValue::plus_infinity()));
415 // if lhs is register value, we should also change its mem obj
416 for (const auto &addr: addrs)
417 {
418 NodeID objId = new_es.getIDFromAddr(addr);
419 if (new_es.inAddrToValTable(objId))
420 {
421 new_es.load(addr).meet_with(
423 }
424 }
425 break;
430 {
431 // Var >= Const, so [var.lb, var.ub].meet_with([Const, +INF])
433 // if lhs is register value, we should also change its mem obj
434 for (const auto &addr: addrs)
435 {
436 NodeID objId = new_es.getIDFromAddr(addr);
437 if (new_es.inAddrToValTable(objId))
438 {
439 new_es.load(addr).meet_with(
441 }
442 }
443 break;
444 }
449 {
450 // Var < Const, so [var.lb, var.ub].meet_with([-INF, const.ub-1])
451 lhs.meet_with(IntervalValue(IntervalValue::minus_infinity(), rhs.ub() - 1));
452 // if lhs is register value, we should also change its mem obj
453 for (const auto &addr: addrs)
454 {
455 NodeID objId = new_es.getIDFromAddr(addr);
456 if (new_es.inAddrToValTable(objId))
457 {
458 new_es.load(addr).meet_with(
460 }
461 }
462 break;
463 }
468 {
469 // Var <= Const, so [var.lb, var.ub].meet_with([-INF, const.ub])
471 // if lhs is register value, we should also change its mem obj
472 for (const auto &addr: addrs)
473 {
474 NodeID objId = new_es.getIDFromAddr(addr);
475 if (new_es.inAddrToValTable(objId))
476 {
477 new_es.load(addr).meet_with(
479 }
480 }
481 break;
482 }
484 break;
486 break;
487 default:
488 assert(false && "implement this part");
489 abort();
490 }
491 as = new_es;
492 return true;
493}
494
497{
499 IntervalValue& switch_cond = new_es[var->getId()].getInterval();
500 s64_t value = succ;
502 for (SVFStmt *cmpVarInStmt: var->getInEdges())
503 {
505 }
506 switch_cond.meet_with(IntervalValue(value, value));
507 if (switch_cond.isBottom())
508 {
509 return false;
510 }
511 while(!workList.empty())
512 {
513 const SVFStmt* stmt = workList.pop();
514 if (SVFUtil::isa<CopyStmt>(stmt))
515 {
516 IntervalValue& copy_cond = new_es[var->getId()].getInterval();
517 copy_cond.meet_with(IntervalValue(value, value));
518 }
519 else if (const LoadStmt* load = SVFUtil::dyn_cast<LoadStmt>(stmt))
520 {
521 if (new_es.inVarToAddrsTable(load->getRHSVarID()))
522 {
523 AddressValue &addrs = new_es[load->getRHSVarID()].getAddrs();
524 for (const auto &addr: addrs)
525 {
526 NodeID objId = new_es.getIDFromAddr(addr);
527 if (new_es.inAddrToValTable(objId))
528 {
530 }
531 }
532 }
533 }
534 }
535 as = new_es;
536 return true;
537}
538
541{
542 const SVFVar *cmpVar = intraEdge->getCondition();
543 if (cmpVar->getInEdges().empty())
544 {
546 intraEdge->getSuccessorCondValue(), as);
547 }
548 else
549 {
550 assert(!cmpVar->getInEdges().empty() &&
551 "no in edges?");
552 SVFStmt *cmpVarInStmt = *cmpVar->getInEdges().begin();
553 if (const CmpStmt *cmpStmt = SVFUtil::dyn_cast<CmpStmt>(cmpVarInStmt))
554 {
556 intraEdge->getSuccessorCondValue(), as);
557 }
558 else
559 {
561 cmpVar, intraEdge->getSuccessorCondValue(), as);
562 }
563 }
564 return true;
565}
568{
569 const ICFGNode* node = icfgSingletonWto->getICFGNode();
570 stat->getBlockTrace()++;
571
572 std::deque<const ICFGNode*> worklist;
573
575 // handle SVF Stmt
576 for (const SVFStmt *stmt: node->getSVFStmts())
577 {
579 }
580 // inlining the callee by calling handleFunc for the callee function
581 if (const CallICFGNode* callnode = SVFUtil::dyn_cast<CallICFGNode>(node))
582 {
584 }
585 for (auto& detector: detectors)
586 detector->detect(getAbsStateFromTrace(node), node);
588}
589
593void AbstractInterpretation::handleWTOComponents(const std::list<const ICFGWTOComp*>& wtoComps)
594{
595 for (const ICFGWTOComp* wtoNode : wtoComps)
596 {
598 }
599}
600
602{
603 if (const ICFGSingletonWTO* node = SVFUtil::dyn_cast<ICFGSingletonWTO>(wtoNode))
604 {
605 if (mergeStatesFromPredecessors(node->getICFGNode()))
606 handleSingletonWTO(node);
607 }
608 // Handle WTO cycles
609 else if (const ICFGCycleWTO* cycle = SVFUtil::dyn_cast<ICFGCycleWTO>(wtoNode))
610 {
611 if (mergeStatesFromPredecessors(cycle->head()->getICFGNode()))
613 }
614 // Assert false for unknown WTO types
615 else
616 assert(false && "unknown WTO type!");
617}
618
620{
621 if (const CallICFGNode* callNode = SVFUtil::dyn_cast<CallICFGNode>(node))
622 {
623 if (isExtCall(callNode))
624 {
626 }
628 {
630 }
631 else if (isDirectCall(callNode))
632 {
634 }
635 else if (isIndirectCall(callNode))
636 {
638 }
639 else
640 assert(false && "implement this part");
641 }
642 else
643 assert (false && "it is not call node");
644}
645
647{
648 return SVFUtil::isExtCall(callNode->getCalledFunction());
649}
650
652{
653 callSiteStack.push_back(callNode);
655 for (auto& detector : detectors)
656 {
657 detector->handleStubFunctions(callNode);
658 }
659 callSiteStack.pop_back();
660}
661
663{
664 return recursiveFuns.find(fun) != recursiveFuns.end();
665}
666
668{
669 const FunObjVar *callfun = callNode->getCalledFunction();
670 if (!callfun)
671 return false;
672 else
673 return isRecursiveFun(callfun);
674}
675
677{
680 const RetICFGNode *retNode = callNode->getRetICFGNode();
681 if (retNode->getSVFStmts().size() > 0)
682 {
683 if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(*retNode->getSVFStmts().begin()))
684 {
685 if (!retPE->getLHSVar()->isPointer() &&
686 !retPE->getLHSVar()->isConstDataOrAggDataButNotNullPtr())
687 {
688 as[retPE->getLHSVarID()] = IntervalValue::top();
689 }
690 }
691 }
693}
694
701
703{
704 const FunObjVar *callfun =callNode->getCalledFunction();
705 if (!callfun)
706 return false;
707 else
708 return !callfun->isDeclaration();
709}
711{
713
715
716 const FunObjVar *calleeFun =callNode->getCalledFunction();
718 {
720 return;
721 }
722
723 callSiteStack.push_back(callNode);
724
725 const ICFGWTO* wto = funcToWTO[calleeFun];
726 handleWTOComponents(wto->getWTOComponents());
727
728 callSiteStack.pop_back();
729 // handle Ret node
730 const RetICFGNode *retNode = callNode->getRetICFGNode();
731 // resume ES to callnode
733}
734
740
742{
746 if (!as.inVarToAddrsTable(call_id))
747 {
748 return;
749 }
752 SVFVar *func_var = svfir->getGNode(as.getIDFromAddr(addr));
753
754 if(const FunObjVar* funObjVar = SVFUtil::dyn_cast<FunObjVar>(func_var))
755 {
757 {
758 if (isRecursiveCallSite(callNode, funObjVar))
759 return;
760 }
761
762 const FunObjVar* callfun = funObjVar->getFunction();
763 callSiteStack.push_back(callNode);
765
766 const ICFGWTO* wto = funcToWTO[callfun];
767 handleWTOComponents(wto->getWTOComponents());
768 callSiteStack.pop_back();
769 // handle Ret node
770 const RetICFGNode* retNode = callNode->getRetICFGNode();
772 }
773}
774
777{
778 const ICFGNode* cycle_head = cycle->head()->getICFGNode();
779 // Flag to indicate if we are in the increasing phase
780 bool increasing = true;
781 // Infinite loop until a fixpoint is reached,
782 for (u32_t cur_iter = 0;; cur_iter++)
783 {
784 // Start widening or narrowing if cur_iter >= widen threshold (widen delay)
786 {
787 // Widen or narrow after processing cycle head node
789 handleWTOComponent(cycle->head());
791 if (increasing)
792 {
793 // Widening, use different modes for nodes within recursions
794 if (isRecursiveFun(cycle->head()->getICFGNode()->getFun()))
795 {
796 // For nodes in recursions, widen to top in WIDEN_TOP mode
798 {
800 }
801 // Perform normal widening in WIDEN_NARROW mode
803 {
805 }
806 // In TOP mode, skipRecursiveCall will handle recursions,
807 // thus should not reach this branch
808 else
809 {
810 assert(false && "Recursion mode TOP should not reach here!");
811 }
812 }
813 // For nodes outside recursions, perform normal widening
814 else
815 {
817 }
818
820 {
821 increasing = false;
822 continue;
823 }
824 }
825 else
826 {
827 // Narrowing, use different modes for nodes within recursions
828 if (isRecursiveFun(cycle->head()->getICFGNode()->getFun()))
829 {
830 // For nodes in recursions, skip narrowing in WIDEN_TOP mode
832 {
833 break;
834 }
835 // Perform normal narrowing in WIDEN_NARROW mode
837 {
838 // Widening's fixpoint reached in the widening phase, switch to narrowing
841 {
842 // Narrowing's fixpoint reached in the narrowing phase, exit loop
843 break;
844 }
845 }
846 // In TOP mode, skipRecursiveCall will handle recursions,
847 // thus should not reach this branch
848 else
849 {
850 assert(false && "Recursion mode TOP should not reach here");
851 }
852 }
853 // For nodes outside recursions, perform normal narrowing
854 else
855 {
856 // Widening's fixpoint reached in the widening phase, switch to narrowing
859 {
860 // Narrowing's fixpoint reached in the narrowing phase, exit loop
861 break;
862 }
863 }
864 }
865 }
866 else
867 {
868 // Handle the cycle head
869 handleWTOComponent(cycle->head());
870 }
871 // Handle the cycle body
872 handleWTOComponents(cycle->getWTOComponents());
873 }
874}
875
877{
878 if (const AddrStmt *addr = SVFUtil::dyn_cast<AddrStmt>(stmt))
879 {
881 }
882 else if (const BinaryOPStmt *binary = SVFUtil::dyn_cast<BinaryOPStmt>(stmt))
883 {
885 }
886 else if (const CmpStmt *cmp = SVFUtil::dyn_cast<CmpStmt>(stmt))
887 {
889 }
890 else if (SVFUtil::isa<UnaryOPStmt>(stmt))
891 {
892 }
893 else if (SVFUtil::isa<BranchStmt>(stmt))
894 {
895 // branch stmt is handled in hasBranchES
896 }
897 else if (const LoadStmt *load = SVFUtil::dyn_cast<LoadStmt>(stmt))
898 {
899 updateStateOnLoad(load);
900 }
901 else if (const StoreStmt *store = SVFUtil::dyn_cast<StoreStmt>(stmt))
902 {
903 updateStateOnStore(store);
904 }
905 else if (const CopyStmt *copy = SVFUtil::dyn_cast<CopyStmt>(stmt))
906 {
908 }
909 else if (const GepStmt *gep = SVFUtil::dyn_cast<GepStmt>(stmt))
910 {
912 }
913 else if (const SelectStmt *select = SVFUtil::dyn_cast<SelectStmt>(stmt))
914 {
916 }
917 else if (const PhiStmt *phi = SVFUtil::dyn_cast<PhiStmt>(stmt))
918 {
920 }
921 else if (const CallPE *callPE = SVFUtil::dyn_cast<CallPE>(stmt))
922 {
923 // To handle Call Edge
925 }
926 else if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(stmt))
927 {
928 updateStateOnRet(retPE);
929 }
930 else
931 assert(false && "implement this part");
932 // NullPtr is index 0, it should not be changed
933 assert(!getAbsStateFromTrace(stmt->getICFGNode())[IRGraph::NullPtr].isInterval() &&
934 !getAbsStateFromTrace(stmt->getICFGNode())[IRGraph::NullPtr].isAddr());
935}
936
938{
940 const RetICFGNode *retNode = callNode->getRetICFGNode();
941 if (retNode->getSVFStmts().size() > 0)
942 {
943 if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(*retNode->getSVFStmts().begin()))
944 {
946 if (!retPE->getLHSVar()->isPointer() && !retPE->getLHSVar()->isConstDataOrAggDataButNotNullPtr())
947 as[retPE->getLHSVarID()] = IntervalValue::top();
948 }
949 }
950 if (!retNode->getOutEdges().empty())
951 {
952 if (retNode->getOutEdges().size() == 1)
953 {
954
955 }
956 else
957 {
958 return;
959 }
960 }
963 for (const SVFBasicBlock * bb: callNode->getCalledFunction()->getReachableBBs())
964 {
965 for (const ICFGNode* node: bb->getICFGNodeList())
966 {
967 for (const SVFStmt *stmt: node->getSVFStmts())
968 {
969 if (const StoreStmt *store = SVFUtil::dyn_cast<StoreStmt>(stmt))
970 {
971 const SVFVar *rhsVar = store->getRHSVar();
972 u32_t lhs = store->getLHSVarID();
973 if (as.inVarToAddrsTable(lhs))
974 {
975 if (!rhsVar->isPointer() && !rhsVar->isConstDataOrAggDataButNotNullPtr())
976 {
977 const AbstractValue &addrs = as[lhs];
978 for (const auto &addr: addrs.getAddrs())
979 {
980 as.store(addr, IntervalValue::top());
981 }
982 }
983 }
984 }
985 }
986 }
987 }
988}
989
990// count the size of memory map
992{
993 if (count == 0)
994 {
995 generalNumMap["ES_Var_AVG_Num"] = 0;
996 generalNumMap["ES_Loc_AVG_Num"] = 0;
997 generalNumMap["ES_Var_Addr_AVG_Num"] = 0;
998 generalNumMap["ES_Loc_Addr_AVG_Num"] = 0;
999 }
1000 ++count;
1001}
1002
1004{
1006 if (count > 0)
1007 {
1008 generalNumMap["ES_Var_AVG_Num"] /= count;
1009 generalNumMap["ES_Loc_AVG_Num"] /= count;
1010 generalNumMap["ES_Var_Addr_AVG_Num"] /= count;
1011 generalNumMap["ES_Loc_Addr_AVG_Num"] /= count;
1012 }
1013 generalNumMap["SVF_STMT_NUM"] = count;
1014 generalNumMap["ICFG_Node_Num"] = _ae->svfir->getICFG()->nodeNum;
1015 u32_t callSiteNum = 0;
1018 for (const auto &it: *_ae->svfir->getICFG())
1019 {
1020 if (it.second->getFun())
1021 {
1022 funs.insert(it.second->getFun());
1023 }
1024 if (const CallICFGNode *callNode = dyn_cast<CallICFGNode>(it.second))
1025 {
1026 if (!isExtCall(callNode))
1027 {
1028 callSiteNum++;
1029 }
1030 else
1031 {
1033 }
1034 }
1035 }
1036 generalNumMap["Func_Num"] = funs.size();
1037 generalNumMap["EXT_CallSite_Num"] = extCallSiteNum;
1038 generalNumMap["NonEXT_CallSite_Num"] = callSiteNum;
1039 timeStatMap["Total_Time(sec)"] = (double)(endTime - startTime) / TIMEINTERVAL;
1040
1041}
1042
1044{
1045 std::string fullName(_ae->moduleName);
1046 std::string name;
1047 std::string moduleName;
1048 if (fullName.find('/') == std::string::npos)
1049 {
1050 std::string name = fullName;
1051 moduleName = name.substr(0, fullName.find('.'));
1052 }
1053 else
1054 {
1055 std::string name = fullName.substr(fullName.find('/'), fullName.size());
1056 moduleName = name.substr(0, fullName.find('.'));
1057 }
1058
1059 SVFUtil::outs() << "\n************************\n";
1060 SVFUtil::outs() << "################ (program : " << moduleName << ")###############\n";
1061 SVFUtil::outs().flags(std::ios::left);
1062 unsigned field_width = 30;
1063 for (NUMStatMap::iterator it = generalNumMap.begin(), eit = generalNumMap.end(); it != eit; ++it)
1064 {
1065 // format out put with width 20 space
1066 std::cout << std::setw(field_width) << it->first << it->second << "\n";
1067 }
1068 SVFUtil::outs() << "-------------------------------------------------------\n";
1069 for (TIMEStatMap::iterator it = timeStatMap.begin(), eit = timeStatMap.end(); it != eit; ++it)
1070 {
1071 // format out put with width 20 space
1072 SVFUtil::outs() << std::setw(field_width) << it->first << it->second << "\n";
1073 }
1074 SVFUtil::outs() << "Memory usage: " << memUsage << "\n";
1075
1076 SVFUtil::outs() << "#######################################################" << std::endl;
1077 SVFUtil::outs().flush();
1078}
1079
1081{
1082 // traverse every ICFGNode
1083 Set<std::string> ae_checkpoint_names = {"svf_assert"};
1084 Set<std::string> buf_checkpoint_names = {"UNSAFE_BUFACCESS", "SAFE_BUFACCESS"};
1085 Set<std::string> nullptr_checkpoint_names = {"UNSAFE_LOAD", "SAFE_LOAD"};
1086
1087 for (auto it = svfir->getICFG()->begin(); it != svfir->getICFG()->end(); ++it)
1088 {
1089 const ICFGNode* node = it->second;
1090 if (const CallICFGNode *call = SVFUtil::dyn_cast<CallICFGNode>(node))
1091 {
1092 if (const FunObjVar *fun = call->getCalledFunction())
1093 {
1094 if (ae_checkpoint_names.find(fun->getName()) !=
1095 ae_checkpoint_names.end())
1096 {
1097 checkpoints.insert(call);
1098 }
1100 {
1101 if (buf_checkpoint_names.find(fun->getName()) !=
1103 {
1104 checkpoints.insert(call);
1105 }
1106 }
1108 {
1109 if (nullptr_checkpoint_names.find(fun->getName()) !=
1111 {
1112 checkpoints.insert(call);
1113 }
1114 }
1115 }
1116 }
1117 }
1118}
1119
1121{
1122 if (checkpoints.size() == 0)
1123 {
1124 return;
1125 }
1126 else
1127 {
1128 SVFUtil::errs() << SVFUtil::errMsg("At least one svf_assert has not been checked!!") << "\n";
1129 for (const CallICFGNode* call: checkpoints)
1130 SVFUtil::errs() << call->toString() + "\n";
1131 assert(false);
1132 }
1133
1134}
1135
1137{
1138 AbstractState& as = getAbsStateFromTrace(gep->getICFGNode());
1139 u32_t rhs = gep->getRHSVarID();
1140 u32_t lhs = gep->getLHSVarID();
1141 IntervalValue offsetPair = as.getElementIndex(gep);
1143 APOffset lb = offsetPair.lb().getIntNumeral() < Options::MaxFieldLimit()?
1144 offsetPair.lb().getIntNumeral(): Options::MaxFieldLimit();
1145 APOffset ub = offsetPair.ub().getIntNumeral() < Options::MaxFieldLimit()?
1146 offsetPair.ub().getIntNumeral(): Options::MaxFieldLimit();
1147 for (APOffset i = lb; i <= ub; i++)
1148 gepAddrs.join_with(as.getGepObjAddrs(rhs,IntervalValue(i)));
1149 as[lhs] = gepAddrs;
1150}
1151
1153{
1154 AbstractState& as = getAbsStateFromTrace(select->getICFGNode());
1155 u32_t res = select->getResID();
1156 u32_t tval = select->getTrueValue()->getId();
1157 u32_t fval = select->getFalseValue()->getId();
1158 u32_t cond = select->getCondition()->getId();
1159 if (as[cond].getInterval().is_numeral())
1160 {
1161 as[res] = as[cond].getInterval().is_zero() ? as[fval] : as[tval];
1162 }
1163 else
1164 {
1165 as[res] = as[tval];
1166 as[res].join_with(as[fval]);
1167 }
1168}
1169
1171{
1172 const ICFGNode* icfgNode = phi->getICFGNode();
1174 u32_t res = phi->getResID();
1176 for (u32_t i = 0; i < phi->getOpVarNum(); i++)
1177 {
1178 NodeID curId = phi->getOpVarID(i);
1179 const ICFGNode* opICFGNode = phi->getOpICFGNode(i);
1181 {
1185 // if IntraEdge, check the condition, if it is feasible, join the value
1186 // if IntraEdge but not conditional edge, join the value
1187 // if not IntraEdge, join the value
1188 if (edge)
1189 {
1190 const IntraCFGEdge* intraEdge = SVFUtil::cast<IntraCFGEdge>(edge);
1191 if (intraEdge->getCondition())
1192 {
1194 rhs.join_with(opAs[curId]);
1195 }
1196 else
1197 rhs.join_with(opAs[curId]);
1198 }
1199 else
1200 {
1201 rhs.join_with(opAs[curId]);
1202 }
1203 }
1204 }
1205 as[res] = rhs;
1206}
1207
1208
1210{
1211 AbstractState& as = getAbsStateFromTrace(callPE->getICFGNode());
1212 NodeID lhs = callPE->getLHSVarID();
1213 NodeID rhs = callPE->getRHSVarID();
1214 as[lhs] = as[rhs];
1215}
1216
1218{
1220 NodeID lhs = retPE->getLHSVarID();
1221 NodeID rhs = retPE->getRHSVarID();
1222 as[lhs] = as[rhs];
1223}
1224
1225
1227{
1228 AbstractState& as = getAbsStateFromTrace(addr->getICFGNode());
1229 as.initObjVar(SVFUtil::cast<ObjVar>(addr->getRHSVar()));
1230 if (addr->getRHSVar()->getType()->getKind() == SVFType::SVFIntegerTy)
1231 as[addr->getRHSVarID()].getInterval().meet_with(utils->getRangeLimitFromType(addr->getRHSVar()->getType()));
1232 as[addr->getLHSVarID()] = as[addr->getRHSVarID()];
1233}
1234
1235
1237{
1241 const ICFGNode* node = binary->getICFGNode();
1243 u32_t op0 = binary->getOpVarID(0);
1244 u32_t op1 = binary->getOpVarID(1);
1245 u32_t res = binary->getResID();
1246 if (!as.inVarToValTable(op0)) as[op0] = IntervalValue::top();
1247 if (!as.inVarToValTable(op1)) as[op1] = IntervalValue::top();
1248 IntervalValue &lhs = as[op0].getInterval(), &rhs = as[op1].getInterval();
1250 switch (binary->getOpcode())
1251 {
1252 case BinaryOPStmt::Add:
1253 case BinaryOPStmt::FAdd:
1254 resVal = (lhs + rhs);
1255 break;
1256 case BinaryOPStmt::Sub:
1257 case BinaryOPStmt::FSub:
1258 resVal = (lhs - rhs);
1259 break;
1260 case BinaryOPStmt::Mul:
1261 case BinaryOPStmt::FMul:
1262 resVal = (lhs * rhs);
1263 break;
1264 case BinaryOPStmt::SDiv:
1265 case BinaryOPStmt::FDiv:
1266 case BinaryOPStmt::UDiv:
1267 resVal = (lhs / rhs);
1268 break;
1269 case BinaryOPStmt::SRem:
1270 case BinaryOPStmt::FRem:
1271 case BinaryOPStmt::URem:
1272 resVal = (lhs % rhs);
1273 break;
1274 case BinaryOPStmt::Xor:
1275 resVal = (lhs ^ rhs);
1276 break;
1277 case BinaryOPStmt::And:
1278 resVal = (lhs & rhs);
1279 break;
1280 case BinaryOPStmt::Or:
1281 resVal = (lhs | rhs);
1282 break;
1283 case BinaryOPStmt::AShr:
1284 resVal = (lhs >> rhs);
1285 break;
1286 case BinaryOPStmt::Shl:
1287 resVal = (lhs << rhs);
1288 break;
1289 case BinaryOPStmt::LShr:
1290 resVal = (lhs >> rhs);
1291 break;
1292 default:
1293 assert(false && "undefined binary: ");
1294 }
1295 as[res] = resVal;
1296}
1297
1299{
1300 AbstractState& as = getAbsStateFromTrace(cmp->getICFGNode());
1301 u32_t op0 = cmp->getOpVarID(0);
1302 u32_t op1 = cmp->getOpVarID(1);
1303 // if it is address
1304 if (as.inVarToAddrsTable(op0) && as.inVarToAddrsTable(op1))
1305 {
1307 AddressValue addrOp0 = as[op0].getAddrs();
1308 AddressValue addrOp1 = as[op1].getAddrs();
1309 u32_t res = cmp->getResID();
1310 if (addrOp0.equals(addrOp1))
1311 {
1312 resVal = IntervalValue(1, 1);
1313 }
1314 else if (addrOp0.hasIntersect(addrOp1))
1315 {
1316 resVal = IntervalValue(0, 1);
1317 }
1318 else
1319 {
1320 resVal = IntervalValue(0, 0);
1321 }
1322 as[res] = resVal;
1323 }
1324 // if op0 or op1 is nullptr, compare abstractValue instead of touching addr or interval
1325 else if (op0 == IRGraph::NullPtr || op1 == IRGraph::NullPtr)
1326 {
1327 u32_t res = cmp->getResID();
1328 IntervalValue resVal = (as[op0].equals(as[op1])) ? IntervalValue(1, 1) : IntervalValue(0, 0);
1329 as[res] = resVal;
1330 }
1331 else
1332 {
1333 if (!as.inVarToValTable(op0))
1335 if (!as.inVarToValTable(op1))
1337 u32_t res = cmp->getResID();
1338 if (as.inVarToValTable(op0) && as.inVarToValTable(op1))
1339 {
1341 if (as[op0].isInterval() && as[op1].isInterval())
1342 {
1343 IntervalValue &lhs = as[op0].getInterval(),
1344 &rhs = as[op1].getInterval();
1345 // AbstractValue
1346 auto predicate = cmp->getPredicate();
1347 switch (predicate)
1348 {
1349 case CmpStmt::ICMP_EQ:
1350 case CmpStmt::FCMP_OEQ:
1351 case CmpStmt::FCMP_UEQ:
1352 resVal = (lhs == rhs);
1353 // resVal = (lhs.getInterval() == rhs.getInterval());
1354 break;
1355 case CmpStmt::ICMP_NE:
1356 case CmpStmt::FCMP_ONE:
1357 case CmpStmt::FCMP_UNE:
1358 resVal = (lhs != rhs);
1359 break;
1360 case CmpStmt::ICMP_UGT:
1361 case CmpStmt::ICMP_SGT:
1362 case CmpStmt::FCMP_OGT:
1363 case CmpStmt::FCMP_UGT:
1364 resVal = (lhs > rhs);
1365 break;
1366 case CmpStmt::ICMP_UGE:
1367 case CmpStmt::ICMP_SGE:
1368 case CmpStmt::FCMP_OGE:
1369 case CmpStmt::FCMP_UGE:
1370 resVal = (lhs >= rhs);
1371 break;
1372 case CmpStmt::ICMP_ULT:
1373 case CmpStmt::ICMP_SLT:
1374 case CmpStmt::FCMP_OLT:
1375 case CmpStmt::FCMP_ULT:
1376 resVal = (lhs < rhs);
1377 break;
1378 case CmpStmt::ICMP_ULE:
1379 case CmpStmt::ICMP_SLE:
1380 case CmpStmt::FCMP_OLE:
1381 case CmpStmt::FCMP_ULE:
1382 resVal = (lhs <= rhs);
1383 break;
1385 resVal = IntervalValue(0, 0);
1386 break;
1387 case CmpStmt::FCMP_TRUE:
1388 resVal = IntervalValue(1, 1);
1389 break;
1390 default:
1391 assert(false && "undefined compare: ");
1392 }
1393 as[res] = resVal;
1394 }
1395 else if (as[op0].isAddr() && as[op1].isAddr())
1396 {
1397 AddressValue &lhs = as[op0].getAddrs(),
1398 &rhs = as[op1].getAddrs();
1399 auto predicate = cmp->getPredicate();
1400 switch (predicate)
1401 {
1402 case CmpStmt::ICMP_EQ:
1403 case CmpStmt::FCMP_OEQ:
1404 case CmpStmt::FCMP_UEQ:
1405 {
1406 if (lhs.hasIntersect(rhs))
1407 {
1408 resVal = IntervalValue(0, 1);
1409 }
1410 else if (lhs.empty() && rhs.empty())
1411 {
1412 resVal = IntervalValue(1, 1);
1413 }
1414 else
1415 {
1416 resVal = IntervalValue(0, 0);
1417 }
1418 break;
1419 }
1420 case CmpStmt::ICMP_NE:
1421 case CmpStmt::FCMP_ONE:
1422 case CmpStmt::FCMP_UNE:
1423 {
1424 if (lhs.hasIntersect(rhs))
1425 {
1426 resVal = IntervalValue(0, 1);
1427 }
1428 else if (lhs.empty() && rhs.empty())
1429 {
1430 resVal = IntervalValue(0, 0);
1431 }
1432 else
1433 {
1434 resVal = IntervalValue(1, 1);
1435 }
1436 break;
1437 }
1438 case CmpStmt::ICMP_UGT:
1439 case CmpStmt::ICMP_SGT:
1440 case CmpStmt::FCMP_OGT:
1441 case CmpStmt::FCMP_UGT:
1442 {
1443 if (lhs.size() == 1 && rhs.size() == 1)
1444 {
1445 resVal = IntervalValue(*lhs.begin() > *rhs.begin());
1446 }
1447 else
1448 {
1449 resVal = IntervalValue(0, 1);
1450 }
1451 break;
1452 }
1453 case CmpStmt::ICMP_UGE:
1454 case CmpStmt::ICMP_SGE:
1455 case CmpStmt::FCMP_OGE:
1456 case CmpStmt::FCMP_UGE:
1457 {
1458 if (lhs.size() == 1 && rhs.size() == 1)
1459 {
1460 resVal = IntervalValue(*lhs.begin() >= *rhs.begin());
1461 }
1462 else
1463 {
1464 resVal = IntervalValue(0, 1);
1465 }
1466 break;
1467 }
1468 case CmpStmt::ICMP_ULT:
1469 case CmpStmt::ICMP_SLT:
1470 case CmpStmt::FCMP_OLT:
1471 case CmpStmt::FCMP_ULT:
1472 {
1473 if (lhs.size() == 1 && rhs.size() == 1)
1474 {
1475 resVal = IntervalValue(*lhs.begin() < *rhs.begin());
1476 }
1477 else
1478 {
1479 resVal = IntervalValue(0, 1);
1480 }
1481 break;
1482 }
1483 case CmpStmt::ICMP_ULE:
1484 case CmpStmt::ICMP_SLE:
1485 case CmpStmt::FCMP_OLE:
1486 case CmpStmt::FCMP_ULE:
1487 {
1488 if (lhs.size() == 1 && rhs.size() == 1)
1489 {
1490 resVal = IntervalValue(*lhs.begin() <= *rhs.begin());
1491 }
1492 else
1493 {
1494 resVal = IntervalValue(0, 1);
1495 }
1496 break;
1497 }
1499 resVal = IntervalValue(0, 0);
1500 break;
1501 case CmpStmt::FCMP_TRUE:
1502 resVal = IntervalValue(1, 1);
1503 break;
1504 default:
1505 assert(false && "undefined compare: ");
1506 }
1507 as[res] = resVal;
1508 }
1509 }
1510 }
1511}
1512
1514{
1516 u32_t rhs = load->getRHSVarID();
1517 u32_t lhs = load->getLHSVarID();
1518 as[lhs] = as.loadValue(rhs);
1519}
1520
1522{
1524 u32_t rhs = store->getRHSVarID();
1525 u32_t lhs = store->getLHSVarID();
1526 as.storeValue(lhs, as[rhs]);
1527}
1528
1530{
1531 auto getZExtValue = [](AbstractState& as, const SVFVar* var)
1532 {
1533 const SVFType* type = var->getType();
1534 if (SVFUtil::isa<SVFIntegerType>(type))
1535 {
1536 u32_t bits = type->getByteSize() * 8;
1537 if (as[var->getId()].getInterval().is_numeral())
1538 {
1539 if (bits == 8)
1540 {
1541 int8_t signed_i8_value = as[var->getId()].getInterval().getIntNumeral();
1544 }
1545 else if (bits == 16)
1546 {
1547 s16_t signed_i16_value = as[var->getId()].getInterval().getIntNumeral();
1550 }
1551 else if (bits == 32)
1552 {
1553 s32_t signed_i32_value = as[var->getId()].getInterval().getIntNumeral();
1556 }
1557 else if (bits == 64)
1558 {
1559 s64_t signed_i64_value = as[var->getId()].getInterval().getIntNumeral();
1561 // we only support i64 at most
1562 }
1563 else
1564 assert(false && "cannot support int type other than u8/16/32/64");
1565 }
1566 else
1567 {
1568 return IntervalValue::top(); // TODO: may have better solution
1569 }
1570 }
1571 return IntervalValue::top(); // TODO: may have better solution
1572 };
1573
1574 auto getTruncValue = [&](const AbstractState& as, const SVFVar* var,
1575 const SVFType* dstType)
1576 {
1577 const IntervalValue& itv = as[var->getId()].getInterval();
1578 if(itv.isBottom()) return itv;
1579 // get the value of ub and lb
1581 s64_t int_ub = itv.ub().getIntNumeral();
1582 // get dst type
1583 u32_t dst_bits = dstType->getByteSize() * 8;
1584 if (dst_bits == 8)
1585 {
1586 // get the signed value of ub and lb
1587 int8_t s8_lb = static_cast<int8_t>(int_lb);
1588 int8_t s8_ub = static_cast<int8_t>(int_ub);
1589 if (s8_lb > s8_ub)
1590 {
1591 // return range of s8
1593 }
1594 return IntervalValue(s8_lb, s8_ub);
1595 }
1596 else if (dst_bits == 16)
1597 {
1598 // get the signed value of ub and lb
1599 s16_t s16_lb = static_cast<s16_t>(int_lb);
1600 s16_t s16_ub = static_cast<s16_t>(int_ub);
1601 if (s16_lb > s16_ub)
1602 {
1603 // return range of s16
1605 }
1606 return IntervalValue(s16_lb, s16_ub);
1607 }
1608 else if (dst_bits == 32)
1609 {
1610 // get the signed value of ub and lb
1611 s32_t s32_lb = static_cast<s32_t>(int_lb);
1612 s32_t s32_ub = static_cast<s32_t>(int_ub);
1613 if (s32_lb > s32_ub)
1614 {
1615 // return range of s32
1617 }
1618 return IntervalValue(s32_lb, s32_ub);
1619 }
1620 else
1621 {
1622 assert(false && "cannot support dst int type other than u8/16/32");
1623 abort();
1624 }
1625 };
1626
1627 AbstractState& as = getAbsStateFromTrace(copy->getICFGNode());
1628 u32_t lhs = copy->getLHSVarID();
1629 u32_t rhs = copy->getRHSVarID();
1630
1631 if (copy->getCopyKind() == CopyStmt::COPYVAL)
1632 {
1633 as[lhs] = as[rhs];
1634 }
1635 else if (copy->getCopyKind() == CopyStmt::ZEXT)
1636 {
1637 as[lhs] = getZExtValue(as, copy->getRHSVar());
1638 }
1639 else if (copy->getCopyKind() == CopyStmt::SEXT)
1640 {
1641 as[lhs] = as[rhs].getInterval();
1642 }
1643 else if (copy->getCopyKind() == CopyStmt::FPTOSI)
1644 {
1645 as[lhs] = as[rhs].getInterval();
1646 }
1647 else if (copy->getCopyKind() == CopyStmt::FPTOUI)
1648 {
1649 as[lhs] = as[rhs].getInterval();
1650 }
1651 else if (copy->getCopyKind() == CopyStmt::SITOFP)
1652 {
1653 as[lhs] = as[rhs].getInterval();
1654 }
1655 else if (copy->getCopyKind() == CopyStmt::UITOFP)
1656 {
1657 as[lhs] = as[rhs].getInterval();
1658 }
1659 else if (copy->getCopyKind() == CopyStmt::TRUNC)
1660 {
1661 as[lhs] = getTruncValue(as, copy->getRHSVar(), copy->getLHSVar()->getType());
1662 }
1663 else if (copy->getCopyKind() == CopyStmt::FPTRUNC)
1664 {
1665 as[lhs] = as[rhs].getInterval();
1666 }
1667 else if (copy->getCopyKind() == CopyStmt::INTTOPTR)
1668 {
1669 //insert nullptr
1670 }
1671 else if (copy->getCopyKind() == CopyStmt::PTRTOINT)
1672 {
1674 }
1675 else if (copy->getCopyKind() == CopyStmt::BITCAST)
1676 {
1677 if (as[rhs].isAddr())
1678 {
1679 as[lhs] = as[rhs];
1680 }
1681 else
1682 {
1683 // do nothing
1684 }
1685 }
1686 else
1687 assert(false && "undefined copy kind");
1688}
1689
1690
1691
#define TIMEINTERVAL
Definition SVFType.h:540
newitem type
Definition cJSON.cpp:2739
copy
Definition cJSON.cpp:414
const char *const name
Definition cJSON.h:264
void performStat() override
std::string getMemUsage()
AbstractInterpretation * _ae
Handles external API calls and manages abstract states.
Definition AbsExtAPI.h:44
void handleExtAPI(const CallICFGNode *call)
Handles an external API call.
IntervalValue getRangeLimitFromType(const SVFType *type)
Gets the range limit from a type.
void updateStateOnCall(const CallPE *callPE)
virtual bool isRecursiveCall(const CallICFGNode *callNode)
virtual void recursiveCallPass(const CallICFGNode *callNode)
void updateStateOnStore(const StoreStmt *store)
virtual bool isDirectCall(const CallICFGNode *callNode)
bool hasAbsStateFromTrace(const ICFGNode *node)
virtual void handleCycleWTO(const ICFGCycleWTO *cycle)
handle wto cycle (loop)
Set< const FunObjVar * > recursiveFuns
void updateStateOnGep(const GepStmt *gep)
virtual void extCallPass(const CallICFGNode *callNode)
virtual void handleGlobalNode()
Global ICFGNode is handled at the entry of the program,.
bool mergeStatesFromPredecessors(const ICFGNode *icfgNode)
virtual void directCallFunPass(const CallICFGNode *callNode)
void handleWTOComponent(const ICFGWTOComp *wtoComp)
virtual bool isExtCall(const CallICFGNode *callNode)
virtual bool isIndirectCall(const CallICFGNode *callNode)
void initWTO()
Compute IWTO for each function partition entry.
void updateStateOnPhi(const PhiStmt *phi)
bool isBranchFeasible(const IntraCFGEdge *intraEdge, AbstractState &as)
std::vector< std::unique_ptr< AEDetector > > detectors
AbstractState & getAbsStateFromTrace(const ICFGNode *node)
Retrieves the abstract state from the trace for a given ICFG node.
Set< const CallICFGNode * > checkpoints
bool isSwitchBranchFeasible(const SVFVar *var, s64_t succ, AbstractState &as)
void updateStateOnSelect(const SelectStmt *select)
virtual void handleSVFStatement(const SVFStmt *stmt)
virtual void indirectCallFunPass(const CallICFGNode *callNode)
SVFIR * svfir
protected data members, also used in subclasses
virtual void runOnModule(ICFG *icfg)
virtual bool isRecursiveCallSite(const CallICFGNode *callNode, const FunObjVar *)
virtual bool isRecursiveFun(const FunObjVar *fun)
void updateStateOnAddr(const AddrStmt *addr)
virtual ~AbstractInterpretation()
Destructor.
bool isCmpBranchFeasible(const CmpStmt *cmpStmt, s64_t succ, AbstractState &as)
virtual void handleCallSite(const ICFGNode *node)
void updateStateOnRet(const RetPE *retPE)
void handleWTOComponents(const std::list< const ICFGWTOComp * > &wtoComps)
Handle two types of WTO components (singleton and cycle)
void updateStateOnCopy(const CopyStmt *copy)
Set< std::pair< const CallICFGNode *, NodeID > > nonRecursiveCallSites
void updateStateOnLoad(const LoadStmt *load)
void updateStateOnBinary(const BinaryOPStmt *binary)
Map< const ICFGNode *, AbstractState > abstractTrace
std::vector< const CallICFGNode * > callSiteStack
virtual void handleSingletonWTO(const ICFGSingletonWTO *icfgSingletonWto)
handle instructions in svf basic blocks
Map< s32_t, s32_t > _switch_lhsrhs_predicate
void updateStateOnCmp(const CmpStmt *cmp)
virtual void SkipRecursiveCall(const CallICFGNode *callnode)
Map< const FunObjVar *, const ICFGWTO * > funcToWTO
AbstractState widening(const AbstractState &other)
domain widen with other, and return the widened domain
AddressValue & getAddrs()
bool meet_with(const AddressValue &other)
Return a intersected AddressValue.
AddrSet::const_iterator begin() const
static AndersenWaveDiff * createAndersenWaveDiff(SVFIR *_pag)
Create an singleton instance directly instead of invoking llvm pass manager.
Definition Andersen.h:407
NodeID getRHSVarID() const
NodeID getLHSVarID() const
s64_t getIntNumeral() const
const CallGraphNode * getCallGraphNode(const std::string &name) const
Get call graph node.
@ ICMP_SGT
signed greater than
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ ICMP_UGE
unsigned greater or equal
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ ICMP_ULE
unsigned less or equal
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ ICMP_NE
not equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_ULT
unsigned less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ ICMP_SLT
signed less than
@ ICMP_UGT
unsigned greater than
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_SLE
signed less or equal
virtual const FunObjVar * getFunction() const
Get containing function, or null for globals/constants.
bool isDeclaration() const
iterator begin()
Iterators.
u32_t nodeNum
total num of edge
NodeType * getGNode(NodeID id) const
Get a node.
const GEdgeSetTy & getInEdges() const
virtual const std::string toString() const
Definition ICFG.cpp:60
const SVFStmtList & getSVFStmts() const
Definition ICFGNode.h:117
ICFGEdge * getICFGEdge(const ICFGNode *src, const ICFGNode *dst, ICFGEdge::ICFGEdgeK kind)
Get a SVFG edge according to src and dst.
Definition ICFG.cpp:311
FunEntryICFGNode * getFunEntryICFGNode(const FunObjVar *fun)
Add a function entry node.
Definition ICFG.cpp:242
GlobalICFGNode * getGlobalICFGNode() const
Definition ICFG.h:230
NodeID getBlkPtr() const
Definition IRGraph.h:255
void meet_with(const IntervalValue &other)
Return a intersected IntervalValue.
static BoundedInt minus_infinity()
Get minus infinity -inf.
static BoundedInt plus_infinity()
Get plus infinity +inf.
static IntervalValue top()
Create the IntervalValue [-inf, +inf].
const BoundedInt & lb() const
Return the lower bound.
static const OptionMap< u32_t > HandleRecur
recursion handling mode, Default: TOP
Definition Options.h:245
static const Option< u32_t > MaxFieldLimit
Maximum number of field derivations for an object.
Definition Options.h:35
static const Option< u32_t > WidenDelay
Definition Options.h:243
static const Option< bool > NullDerefCheck
nullptr dereference checker, Default: false
Definition Options.h:253
static const Option< bool > PStat
Definition Options.h:116
static const Option< bool > BufferOverflowCheck
buffer overflow checker, Default: false
Definition Options.h:251
CallGraph * getCallGraph() const
Return call graph.
SCCDetection< CallGraph * > CallGraphSCC
CallGraphSCC * getCallGraphSCC() const
Return call graph SCC.
const CallGraph * getCallGraph()
Get CG.
Definition SVFIR.h:180
ICFG * getICFG() const
Definition SVFIR.h:163
const CallSiteToFunPtrMap & getIndirectCallsites() const
Add/get indirect callsites.
Definition SVFIR.h:374
static SVFIR * getPAG(bool buildFromFile=false)
Singleton design here to make sure we only have one instance during any analysis.
Definition SVFIR.h:116
NUMStatMap generalNumMap
Definition SVFStat.h:76
virtual void endClk()
Definition SVFStat.h:63
std::string moduleName
Definition SVFStat.h:99
TIMEStatMap timeStatMap
Definition SVFStat.h:78
virtual void startClk()
Definition SVFStat.h:58
double endTime
Definition SVFStat.h:81
double startTime
Definition SVFStat.h:80
ICFGNode * getICFGNode() const
u32_t getByteSize() const
Definition SVFType.h:249
std::string errMsg(const std::string &msg)
Print error message by converting a string into red string output.
Definition SVFUtil.cpp:78
std::ostream & errs()
Overwrite llvm::errs()
Definition SVFUtil.h:58
bool isExtCall(const FunObjVar *fun)
Definition SVFUtil.cpp:437
std::ostream & outs()
Overwrite llvm::outs()
Definition SVFUtil.h:52
for isBitcode
Definition BasicTypes.h:68
u32_t NodeID
Definition GeneralType.h:56
s64_t APOffset
Definition GeneralType.h:60
signed short s16_t
Definition GeneralType.h:54
unsigned short u16_t
Definition GeneralType.h:53
llvm::IRBuilder IRBuilder
Definition BasicTypes.h:74
signed s32_t
Definition GeneralType.h:48
unsigned u32_t
Definition GeneralType.h:47
signed long long s64_t
Definition GeneralType.h:50