Static Value-Flow Analysis
Loading...
Searching...
No Matches
AbstractInterpretation.cpp
Go to the documentation of this file.
1//===- AbstractExecution.cpp -- Abstract Execution---------------------------------//
2//
3// SVF: Static Value-Flow Analysis
4//
5// Copyright (C) <2013-> <Yulei Sui>
6//
7
8// This program is free software: you can redistribute it and/or modify
9// it under the terms of the GNU Affero General Public License as published by
10// the Free Software Foundation, either version 3 of the License, or
11// (at your option) any later version.
12
13// This program is distributed in the hope that it will be useful,
14// but WITHOUT ANY WARRANTY; without even the implied warranty of
15// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16// GNU Affero General Public License for more details.
17
18// You should have received a copy of the GNU Affero General Public License
19// along with this program. If not, see <http://www.gnu.org/licenses/>.
20//
21//===----------------------------------------------------------------------===//
22
23
24//
25// Created on: Jan 10, 2024
26// Author: Xiao Cheng, Jiawei Wang
27//
28
30#include "AE/Svfexe/AbsExtAPI.h"
31#include "SVFIR/SVFIR.h"
32#include "Util/Options.h"
33#include "Util/WorkList.h"
34#include "Graphs/CallGraph.h"
35#include "WPA/Andersen.h"
36#include <cmath>
37#include <deque>
38
39using namespace SVF;
40using namespace SVFUtil;
41using namespace z3;
42
43
45{
46 stat->startClk();
47 icfg = _icfg;
50
51 // Run Andersen's pointer analysis and build WTO
56
59
60 analyse();
62 stat->endClk();
64 if (Options::PStat())
66 for (auto& detector: detectors)
67 detector->reportBug();
68}
69
80
85{
86 std::deque<const FunObjVar*> entryFunctions;
87
88 for (auto it = callGraph->begin(); it != callGraph->end(); ++it)
89 {
90 const CallGraphNode* cgNode = it->second;
91 const FunObjVar* fun = cgNode->getFunction();
92
93 // Skip declarations
94 if (fun->isDeclaration())
95 continue;
96
97 // Entry points are functions without callers (no incoming edges)
98 if (cgNode->getInEdges().empty())
99 {
100 // If main exists, put it first for priority using deque's push_front
101 if (fun->getName() == "main")
102 {
103 entryFunctions.push_front(fun);
104 }
105 else
106 {
107 entryFunctions.push_back(fun);
108 }
109 }
110 }
111
112 return entryFunctions;
113}
114
115
118{
119 // Always use multi-entry analysis from all entry points
121}
122
127{
128 // Collect all entry point functions
129 std::deque<const FunObjVar*> entryFunctions = collectProgEntryFuns();
130
131 if (entryFunctions.empty())
132 {
133 assert(false && "No entry functions found for analysis");
134 return;
135 }
136 // handle Global ICFGNode of SVFModule
138 for (const FunObjVar* entryFun : entryFunctions)
139 {
142 }
143}
144
151{
152 const ICFGNode* node = icfg->getGlobalICFGNode();
155
156 // Global Node, we just need to handle addr, load, store, copy and gep
157 for (const SVFStmt *stmt: node->getSVFStmts())
158 {
160 }
161
162 // BlkPtr represents a pointer whose target is statically unknown (e.g., from
163 // int2ptr casts, external function returns, or unmodeled instructions like
164 // AtomicCmpXchg). It should be an address pointing to the BlackHole object
165 // (ID=2), NOT an interval top.
166 //
167 // History: this was originally set to IntervalValue::top() as a quick fix when
168 // the analysis crashed on programs containing uninitialized BlkPtr. However,
169 // BlkPtr is semantically a *pointer* (address domain), not a numeric value
170 // (interval domain). Setting it to interval top broke cross-domain consistency:
171 // the interval domain and address domain gave contradictory information for the
172 // same variable. The correct representation is an AddressValue containing the
173 // BlackHole virtual address, which means "points to unknown memory".
176}
177
182{
183 std::vector<AbstractState> workList;
185 for (auto& edge: icfgNode->getInEdges())
186 {
187 if (abstractTrace.find(edge->getSrcNode()) != abstractTrace.end())
188 {
189
190 if (const IntraCFGEdge *intraCfgEdge =
191 SVFUtil::dyn_cast<IntraCFGEdge>(edge))
192 {
193 AbstractState tmpEs = abstractTrace[edge->getSrcNode()];
194 if (intraCfgEdge->getCondition())
195 {
197 {
198 workList.push_back(tmpEs);
199 }
200 else
201 {
202 // do nothing
203 }
204 }
205 else
206 {
207 workList.push_back(tmpEs);
208 }
209 }
210 else if (const CallCFGEdge *callCfgEdge =
211 SVFUtil::dyn_cast<CallCFGEdge>(edge))
212 {
213
214 // context insensitive implementation
215 workList.push_back(
216 abstractTrace[callCfgEdge->getSrcNode()]);
217 }
218 else if (const RetCFGEdge *retCfgEdge =
219 SVFUtil::dyn_cast<RetCFGEdge>(edge))
220 {
221 // Only include return edge if the corresponding callsite was processed
222 // (skipped recursive callsites in WIDEN_ONLY/WIDEN_NARROW won't have state)
223 const RetICFGNode* retNode = SVFUtil::dyn_cast<RetICFGNode>(icfgNode);
224 if (hasAbsStateFromTrace(retNode->getCallICFGNode()))
225 {
226 workList.push_back(abstractTrace[retCfgEdge->getSrcNode()]);
227 }
228 }
229 else
230 assert(false && "Unhandled ICFGEdge type encountered!");
231 }
232 }
233 if (workList.size() == 0)
234 {
235 return false;
236 }
237 else
238 {
239 while (!workList.empty())
240 {
241 preAs.joinWith(workList.back());
242 workList.pop_back();
243 }
244 // Has ES on the in edges - Feasible block
245 // update post as
246 abstractTrace[icfgNode] = preAs;
247 return true;
248 }
249}
250
251
254{
256 // get cmp stmt's op0, op1, and predicate
257 NodeID op0 = cmpStmt->getOpVarID(0);
258 NodeID op1 = cmpStmt->getOpVarID(1);
259 NodeID res_id = cmpStmt->getResID();
260 s32_t predicate = cmpStmt->getPredicate();
261 // if op0 or op1 is nullptr, no need to change value, just copy the state
263 {
264 as = new_es;
265 return true;
266 }
267 // if op0 or op1 is undefined, return;
268 // skip address compare
269 if (new_es.inVarToAddrsTable(op0) || new_es.inVarToAddrsTable(op1))
270 {
271 as = new_es;
272 return true;
273 }
274 const LoadStmt *load_op0 = nullptr;
275 const LoadStmt *load_op1 = nullptr;
276 // get '%1 = load i32 s', and load inst may not exist
277 const SVFVar* loadVar0 = svfir->getSVFVar(op0);
278 if (!loadVar0->getInEdges().empty())
279 {
280 SVFStmt *loadVar0InStmt = *loadVar0->getInEdges().begin();
281 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar0InStmt))
282 {
284 }
285 else if (const CopyStmt *copyStmt = SVFUtil::dyn_cast<CopyStmt>(loadVar0InStmt))
286 {
287 loadVar0 = svfir->getSVFVar(copyStmt->getRHSVarID());
288 if (!loadVar0->getInEdges().empty())
289 {
290 SVFStmt *loadVar0InStmt2 = *loadVar0->getInEdges().begin();
291 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar0InStmt2))
292 {
294 }
295 }
296 }
297 }
298
299 const SVFVar* loadVar1 = svfir->getSVFVar(op1);
300 if (!loadVar1->getInEdges().empty())
301 {
302 SVFStmt *loadVar1InStmt = *loadVar1->getInEdges().begin();
303 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar1InStmt))
304 {
306 }
307 else if (const CopyStmt *copyStmt = SVFUtil::dyn_cast<CopyStmt>(loadVar1InStmt))
308 {
309 loadVar1 = svfir->getSVFVar(copyStmt->getRHSVarID());
310 if (!loadVar1->getInEdges().empty())
311 {
312 SVFStmt *loadVar1InStmt2 = *loadVar1->getInEdges().begin();
313 if (const LoadStmt *loadStmt = SVFUtil::dyn_cast<LoadStmt>(loadVar1InStmt2))
314 {
316 }
317 }
318 }
319 }
320 // for const X const, we may get concrete resVal instantly
321 // for var X const, we may get [0,1] if the intersection of var and const is not empty set
322 IntervalValue resVal = new_es[res_id].getInterval();
324 // If Var X const generates bottom value, it means this branch path is not feasible.
325 if (resVal.isBottom())
326 {
327 return false;
328 }
329
330 bool b0 = new_es[op0].getInterval().is_numeral();
331 bool b1 = new_es[op1].getInterval().is_numeral();
332
333 // if const X var, we should reverse op0 and op1.
334 if (b0 && !b1)
335 {
336 std::swap(op0, op1);
337 std::swap(load_op0, load_op1);
338 predicate = _switch_lhsrhs_predicate[predicate];
339 }
340 else
341 {
342 // if var X var, we cannot preset the branch condition to infer the intervals of var0,var1
343 if (!b0 && !b1)
344 {
345 as = new_es;
346 return true;
347 }
348 // if const X const, we can instantly get the resVal
349 else if (b0 && b1)
350 {
351 as = new_es;
352 return true;
353 }
354 }
355 // if cmp is 'var X const == false', we should reverse predicate 'var X' const == true'
356 // X' is reverse predicate of X
357 if (succ == 0)
358 {
359 predicate = _reverse_predicate[predicate];
360 }
361 else {}
362 // change interval range according to the compare predicate
363 AddressValue addrs;
364 if(load_op0 && new_es.inVarToAddrsTable(load_op0->getRHSVarID()))
365 addrs = new_es[load_op0->getRHSVarID()].getAddrs();
366
367 IntervalValue &lhs = new_es[op0].getInterval(), &rhs = new_es[op1].getInterval();
368 switch (predicate)
369 {
373 {
374 // Var == Const, so [var.lb, var.ub].meet_with(const)
376 // if lhs is register value, we should also change its mem obj
377 for (const auto &addr: addrs)
378 {
379 NodeID objId = new_es.getIDFromAddr(addr);
380 if (new_es.inAddrToValTable(objId))
381 {
382 new_es.load(addr).meet_with(rhs);
383 }
384 }
385 break;
386 }
390 // Compliment set
391 break;
396 // Var > Const, so [var.lb, var.ub].meet_with([Const+1, +INF])
397 lhs.meet_with(IntervalValue(rhs.lb() + 1, IntervalValue::plus_infinity()));
398 // if lhs is register value, we should also change its mem obj
399 for (const auto &addr: addrs)
400 {
401 NodeID objId = new_es.getIDFromAddr(addr);
402 if (new_es.inAddrToValTable(objId))
403 {
404 new_es.load(addr).meet_with(
406 }
407 }
408 break;
413 {
414 // Var >= Const, so [var.lb, var.ub].meet_with([Const, +INF])
416 // if lhs is register value, we should also change its mem obj
417 for (const auto &addr: addrs)
418 {
419 NodeID objId = new_es.getIDFromAddr(addr);
420 if (new_es.inAddrToValTable(objId))
421 {
422 new_es.load(addr).meet_with(
424 }
425 }
426 break;
427 }
432 {
433 // Var < Const, so [var.lb, var.ub].meet_with([-INF, const.ub-1])
434 lhs.meet_with(IntervalValue(IntervalValue::minus_infinity(), rhs.ub() - 1));
435 // if lhs is register value, we should also change its mem obj
436 for (const auto &addr: addrs)
437 {
438 NodeID objId = new_es.getIDFromAddr(addr);
439 if (new_es.inAddrToValTable(objId))
440 {
441 new_es.load(addr).meet_with(
443 }
444 }
445 break;
446 }
451 {
452 // Var <= Const, so [var.lb, var.ub].meet_with([-INF, const.ub])
454 // if lhs is register value, we should also change its mem obj
455 for (const auto &addr: addrs)
456 {
457 NodeID objId = new_es.getIDFromAddr(addr);
458 if (new_es.inAddrToValTable(objId))
459 {
460 new_es.load(addr).meet_with(
462 }
463 }
464 break;
465 }
467 break;
469 break;
470 default:
471 assert(false && "implement this part");
472 abort();
473 }
474 as = new_es;
475 return true;
476}
477
480{
482 IntervalValue& switch_cond = new_es[var->getId()].getInterval();
483 s64_t value = succ;
485 for (SVFStmt *cmpVarInStmt: var->getInEdges())
486 {
488 }
489 switch_cond.meet_with(IntervalValue(value, value));
490 if (switch_cond.isBottom())
491 {
492 return false;
493 }
494 while(!workList.empty())
495 {
496 const SVFStmt* stmt = workList.pop();
497 if (SVFUtil::isa<CopyStmt>(stmt))
498 {
499 IntervalValue& copy_cond = new_es[var->getId()].getInterval();
500 copy_cond.meet_with(IntervalValue(value, value));
501 }
502 else if (const LoadStmt* load = SVFUtil::dyn_cast<LoadStmt>(stmt))
503 {
504 if (new_es.inVarToAddrsTable(load->getRHSVarID()))
505 {
506 AddressValue &addrs = new_es[load->getRHSVarID()].getAddrs();
507 for (const auto &addr: addrs)
508 {
509 NodeID objId = new_es.getIDFromAddr(addr);
510 if (new_es.inAddrToValTable(objId))
511 {
513 }
514 }
515 }
516 }
517 }
518 as = new_es;
519 return true;
520}
521
524{
525 const SVFVar *cmpVar = intraEdge->getCondition();
526 if (cmpVar->getInEdges().empty())
527 {
529 intraEdge->getSuccessorCondValue(), as);
530 }
531 else
532 {
533 assert(!cmpVar->getInEdges().empty() &&
534 "no in edges?");
535 SVFStmt *cmpVarInStmt = *cmpVar->getInEdges().begin();
536 if (const CmpStmt *cmpStmt = SVFUtil::dyn_cast<CmpStmt>(cmpVarInStmt))
537 {
539 intraEdge->getSuccessorCondValue(), as);
540 }
541 else
542 {
544 cmpVar, intraEdge->getSuccessorCondValue(), as);
545 }
546 }
547 return true;
548}
551{
552 const ICFGNode* node = icfgSingletonWto->getICFGNode();
553 stat->getBlockTrace()++;
554
555 std::deque<const ICFGNode*> worklist;
556
558 // handle SVF Stmt
559 for (const SVFStmt *stmt: node->getSVFStmts())
560 {
562 }
563 // inlining the callee by calling handleFunc for the callee function
564 if (const CallICFGNode* callnode = SVFUtil::dyn_cast<CallICFGNode>(node))
565 {
567 }
568 for (auto& detector: detectors)
569 detector->detect(getAbsStateFromTrace(node), node);
571}
572
578{
579 // Store the previous state for fixpoint detection
582 if (hadPrevState)
583 prevState = abstractTrace[node];
584
585 // For function entry nodes, initialize state from predecessors or global
586 bool isFunEntry = SVFUtil::isa<FunEntryICFGNode>(node);
587 if (isFunEntry)
588 {
589 // Try to merge from predecessors first (handles call edges)
591 {
592 // No predecessors with state - inherit from global node
595 {
597 }
598 else
599 {
601 }
602 }
603 }
604 else
605 {
606 // Merge states from predecessors
608 return false;
609 }
610
611 stat->getBlockTrace()++;
613
614 // Handle SVF statements
615 for (const SVFStmt *stmt: node->getSVFStmts())
616 {
618 }
619
620 // Handle call sites
621 if (const CallICFGNode* callNode = SVFUtil::dyn_cast<CallICFGNode>(node))
622 {
624 }
625
626 // Run detectors
627 for (auto& detector: detectors)
628 detector->detect(getAbsStateFromTrace(node), node);
630
631 // Track this node as analyzed (for coverage statistics across all entry points)
632 allAnalyzedNodes.insert(node);
633
634 // Check if state changed (for fixpoint detection)
635 // For entry nodes on first visit, always return true to process successors
636 if (isFunEntry && !hadPrevState)
637 return true;
638
639 if (abstractTrace[node] == prevState)
640 return false;
641
642 return true;
643}
644
649std::vector<const ICFGNode*> AbstractInterpretation::getNextNodes(const ICFGNode* node) const
650{
651 std::vector<const ICFGNode*> outEdges;
652 for (const ICFGEdge* edge : node->getOutEdges())
653 {
654 const ICFGNode* dst = edge->getDstNode();
655 // Only nodes inside the same function are included
656 if (dst->getFun() == node->getFun())
657 {
658 outEdges.push_back(dst);
659 }
660 }
661 if (const CallICFGNode* callNode = SVFUtil::dyn_cast<CallICFGNode>(node))
662 {
663 // Shortcut to the RetICFGNode
664 const ICFGNode* retNode = callNode->getRetICFGNode();
665 outEdges.push_back(retNode);
666 }
667 return outEdges;
668}
669
674std::vector<const ICFGNode*> AbstractInterpretation::getNextNodesOfCycle(const ICFGCycleWTO* cycle) const
675{
677 // Insert the head of the cycle and all component nodes
678 cycleNodes.insert(cycle->head()->getICFGNode());
679 for (const ICFGWTOComp* comp : cycle->getWTOComponents())
680 {
681 if (const ICFGSingletonWTO* singleton = SVFUtil::dyn_cast<ICFGSingletonWTO>(comp))
682 {
683 cycleNodes.insert(singleton->getICFGNode());
684 }
685 else if (const ICFGCycleWTO* subCycle = SVFUtil::dyn_cast<ICFGCycleWTO>(comp))
686 {
687 cycleNodes.insert(subCycle->head()->getICFGNode());
688 }
689 }
690
691 std::vector<const ICFGNode*> outEdges;
692
693 // Check head's successors
694 std::vector<const ICFGNode*> nextNodes = getNextNodes(cycle->head()->getICFGNode());
695 for (const ICFGNode* nextNode : nextNodes)
696 {
697 // Only nodes that point outside the cycle are included
698 if (cycleNodes.find(nextNode) == cycleNodes.end())
699 {
700 outEdges.push_back(nextNode);
701 }
702 }
703
704 // Check each component's successors
705 for (const ICFGWTOComp* comp : cycle->getWTOComponents())
706 {
707 if (const ICFGSingletonWTO* singleton = SVFUtil::dyn_cast<ICFGSingletonWTO>(comp))
708 {
709 std::vector<const ICFGNode*> compNextNodes = getNextNodes(singleton->getICFGNode());
710 for (const ICFGNode* nextNode : compNextNodes)
711 {
712 if (cycleNodes.find(nextNode) == cycleNodes.end())
713 {
714 outEdges.push_back(nextNode);
715 }
716 }
717 }
718 // Skip inner cycles - they are handled within the outer cycle
719 }
720 return outEdges;
721}
722
730{
731 auto it = preAnalysis->getFuncToWTO().find(funEntry->getFun());
732 if (it == preAnalysis->getFuncToWTO().end())
733 return;
734
735 // Push all top-level WTO components into the worklist in WTO order
736 FIFOWorkList<const ICFGWTOComp*> worklist(it->second->getWTOComponents());
737
738 while (!worklist.empty())
739 {
740 const ICFGWTOComp* comp = worklist.pop();
741
742 if (const ICFGSingletonWTO* singleton = SVFUtil::dyn_cast<ICFGSingletonWTO>(comp))
743 {
744 handleICFGNode(singleton->getICFGNode());
745 }
746 else if (const ICFGCycleWTO* cycle = SVFUtil::dyn_cast<ICFGCycleWTO>(comp))
747 {
749 }
750 }
751}
752
753
755{
756 if (const CallICFGNode* callNode = SVFUtil::dyn_cast<CallICFGNode>(node))
757 {
758 if (isExtCall(callNode))
759 {
761 }
762 else
763 {
764 // Handle both direct and indirect calls uniformly
766 }
767 }
768 else
769 assert (false && "it is not call node");
770}
771
773{
774 return SVFUtil::isExtCall(callNode->getCalledFunction());
775}
776
778{
780 for (auto& detector : detectors)
781 {
782 detector->handleStubFunctions(callNode);
783 }
784}
785
791
794{
797 const RetICFGNode *retNode = callNode->getRetICFGNode();
798 if (retNode->getSVFStmts().size() > 0)
799 {
800 if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(*retNode->getSVFStmts().begin()))
801 {
802 if (!retPE->getLHSVar()->isPointer() &&
803 !retPE->getLHSVar()->isConstDataOrAggDataButNotNullPtr())
804 {
805 as[retPE->getLHSVarID()] = IntervalValue::top();
806 }
807 }
808 }
810}
811
819
822{
823 // Direct call: get callee directly from call node
824 if (const FunObjVar* callee = callNode->getCalledFunction())
825 return callee;
826
827 // Indirect call: resolve callee through pointer analysis
829 auto it = callsiteMaps.find(callNode);
830 if (it == callsiteMaps.end())
831 return nullptr;
832
833 NodeID call_id = it->second;
835 return nullptr;
836
838 if (!as.inVarToAddrsTable(call_id))
839 return nullptr;
840
842 if (Addrs.getAddrs().empty())
843 return nullptr;
844
846 const SVFVar* func_var = svfir->getSVFVar(as.getIDFromAddr(addr));
847 return SVFUtil::dyn_cast<FunObjVar>(func_var);
848}
849
852{
854 if (!callee)
855 return false;
856
857 // Non-recursive function: never skip, always inline
859 return false;
860
861 // For recursive functions, skip only recursive callsites (within same SCC).
862 // Entry calls (from outside SCC) are not skipped - they are inlined so that
863 // handleLoopOrRecursion() can analyze the function body.
864 // This applies uniformly to all modes (TOP/WIDEN_ONLY/WIDEN_NARROW).
866}
867
870{
871 // Non-recursive functions (regular loops): always apply narrowing
872 if (!isRecursiveFun(fun))
873 return true;
874
875 // Recursive functions: WIDEN_NARROW applies narrowing, WIDEN_ONLY does not
876 // TOP mode exits early in handleLoopOrRecursion, so should not reach here
877 switch (Options::HandleRecur())
878 {
879 case TOP:
880 assert(false && "TOP mode should not reach narrowing phase for recursive functions");
881 return false;
882 case WIDEN_ONLY:
883 return false; // Skip narrowing for recursive functions
884 case WIDEN_NARROW:
885 return true; // Apply narrowing for recursive functions
886 default:
887 assert(false && "Unknown recursion handling mode");
888 return false;
889 }
890}
901{
904
905 // Skip recursive callsites (within SCC); entry calls are not skipped
907 return;
908
909 // Direct call: callee is known
910 if (const FunObjVar* callee = callNode->getCalledFunction())
911 {
914 const RetICFGNode* retNode = callNode->getRetICFGNode();
916 return;
917 }
918
919 // Indirect call: use Andersen's call graph to get all resolved callees.
920 // The call graph was built during PreAnalysis::initWTO() by running Andersen's pointer analysis,
921 // which over-approximates the set of possible targets for each indirect callsite.
923 {
925 for (const FunObjVar* callee : callees)
926 {
927 if (callee->isDeclaration())
928 continue;
931 }
932 }
933 const RetICFGNode* retNode = callNode->getRetICFGNode();
935}
936
978{
979 const ICFGNode* cycle_head = cycle->head()->getICFGNode();
980
981 // TOP mode for recursive function cycles: use handleRecursiveCall to set
982 // all stores and return value to TOP, maintaining original semantics
983 if (Options::HandleRecur() == TOP && isRecursiveFun(cycle_head->getFun()))
984 {
985 if (caller)
986 {
988 }
989 return;
990 }
991
992 // WIDEN_ONLY / WIDEN_NARROW modes (and regular loops): iterate until fixpoint
993 bool increasing = true;
995
996 for (u32_t cur_iter = 0;; cur_iter++)
997 {
998 // Get the abstract state before processing the cycle head
1002
1003 // Process the cycle head node
1006
1007 // Start widening or narrowing if cur_iter >= widen delay threshold
1008 if (cur_iter >= widen_delay)
1009 {
1010 if (increasing)
1011 {
1012 // Apply widening operator
1014
1016 {
1017 // Widening fixpoint reached, switch to narrowing phase
1018 increasing = false;
1019 continue;
1020 }
1021 }
1022 else
1023 {
1024 // Narrowing phase - check if narrowing should be applied
1025 if (!shouldApplyNarrowing(cycle_head->getFun()))
1026 {
1027 break;
1028 }
1029
1030 // Apply narrowing
1033 {
1034 // Narrowing fixpoint reached, exit loop
1035 break;
1036 }
1037 }
1038 }
1039
1040 // Process cycle body components
1041 for (const ICFGWTOComp* comp : cycle->getWTOComponents())
1042 {
1043 if (const ICFGSingletonWTO* singleton = SVFUtil::dyn_cast<ICFGSingletonWTO>(comp))
1044 {
1045 handleICFGNode(singleton->getICFGNode());
1046 }
1047 else if (const ICFGCycleWTO* subCycle = SVFUtil::dyn_cast<ICFGCycleWTO>(comp))
1048 {
1049 // Handle nested cycle recursively
1051 }
1052 }
1053 }
1054}
1055
1057{
1058 if (const AddrStmt *addr = SVFUtil::dyn_cast<AddrStmt>(stmt))
1059 {
1061 }
1062 else if (const BinaryOPStmt *binary = SVFUtil::dyn_cast<BinaryOPStmt>(stmt))
1063 {
1065 }
1066 else if (const CmpStmt *cmp = SVFUtil::dyn_cast<CmpStmt>(stmt))
1067 {
1069 }
1070 else if (SVFUtil::isa<UnaryOPStmt>(stmt))
1071 {
1072 }
1073 else if (SVFUtil::isa<BranchStmt>(stmt))
1074 {
1075 // branch stmt is handled in hasBranchES
1076 }
1077 else if (const LoadStmt *load = SVFUtil::dyn_cast<LoadStmt>(stmt))
1078 {
1079 updateStateOnLoad(load);
1080 }
1081 else if (const StoreStmt *store = SVFUtil::dyn_cast<StoreStmt>(stmt))
1082 {
1083 updateStateOnStore(store);
1084 }
1085 else if (const CopyStmt *copy = SVFUtil::dyn_cast<CopyStmt>(stmt))
1086 {
1088 }
1089 else if (const GepStmt *gep = SVFUtil::dyn_cast<GepStmt>(stmt))
1090 {
1092 }
1093 else if (const SelectStmt *select = SVFUtil::dyn_cast<SelectStmt>(stmt))
1094 {
1096 }
1097 else if (const PhiStmt *phi = SVFUtil::dyn_cast<PhiStmt>(stmt))
1098 {
1100 }
1101 else if (const CallPE *callPE = SVFUtil::dyn_cast<CallPE>(stmt))
1102 {
1103 // To handle Call Edge
1105 }
1106 else if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(stmt))
1107 {
1108 updateStateOnRet(retPE);
1109 }
1110 else
1111 assert(false && "implement this part");
1112 // NullPtr is index 0, it should not be changed
1113 assert(!getAbsStateFromTrace(stmt->getICFGNode())[IRGraph::NullPtr].isInterval() &&
1114 !getAbsStateFromTrace(stmt->getICFGNode())[IRGraph::NullPtr].isAddr());
1115}
1116
1119{
1121 const RetICFGNode *retNode = callNode->getRetICFGNode();
1122 if (retNode->getSVFStmts().size() > 0)
1123 {
1124 if (const RetPE *retPE = SVFUtil::dyn_cast<RetPE>(*retNode->getSVFStmts().begin()))
1125 {
1127 if (!retPE->getLHSVar()->isPointer() && !retPE->getLHSVar()->isConstDataOrAggDataButNotNullPtr())
1128 as[retPE->getLHSVarID()] = IntervalValue::top();
1129 }
1130 }
1131 if (!retNode->getOutEdges().empty())
1132 {
1133 if (retNode->getOutEdges().size() == 1)
1134 {
1135
1136 }
1137 else
1138 {
1139 return;
1140 }
1141 }
1144 for (const SVFBasicBlock * bb: callNode->getCalledFunction()->getReachableBBs())
1145 {
1146 for (const ICFGNode* node: bb->getICFGNodeList())
1147 {
1148 for (const SVFStmt *stmt: node->getSVFStmts())
1149 {
1150 if (const StoreStmt *store = SVFUtil::dyn_cast<StoreStmt>(stmt))
1151 {
1152 const SVFVar *rhsVar = store->getRHSVar();
1153 u32_t lhs = store->getLHSVarID();
1154 if (as.inVarToAddrsTable(lhs))
1155 {
1156 if (!rhsVar->isPointer() && !rhsVar->isConstDataOrAggDataButNotNullPtr())
1157 {
1158 const AbstractValue &addrs = as[lhs];
1159 for (const auto &addr: addrs.getAddrs())
1160 {
1161 as.store(addr, IntervalValue::top());
1162 }
1163 }
1164 }
1165 }
1166 }
1167 }
1168 }
1169}
1170
1171// count the size of memory map
1173{
1174 if (count == 0)
1175 {
1176 generalNumMap["ES_Var_AVG_Num"] = 0;
1177 generalNumMap["ES_Loc_AVG_Num"] = 0;
1178 generalNumMap["ES_Var_Addr_AVG_Num"] = 0;
1179 generalNumMap["ES_Loc_Addr_AVG_Num"] = 0;
1180 }
1181 ++count;
1182}
1183
1185{
1187 if (count > 0)
1188 {
1189 generalNumMap["ES_Var_AVG_Num"] /= count;
1190 generalNumMap["ES_Loc_AVG_Num"] /= count;
1191 generalNumMap["ES_Var_Addr_AVG_Num"] /= count;
1192 generalNumMap["ES_Loc_Addr_AVG_Num"] /= count;
1193 }
1194 generalNumMap["SVF_STMT_NUM"] = count;
1195
1197 generalNumMap["ICFG_Node_Num"] = totalICFGNodes;
1198
1199 // Calculate coverage: use allAnalyzedNodes which tracks all nodes across all entry points
1201 generalNumMap["Analyzed_ICFG_Node_Num"] = analyzedNodes;
1202
1203 // Coverage percentage (stored as integer percentage * 100 for precision)
1204 if (totalICFGNodes > 0)
1205 {
1206 double coveragePercent = (double)analyzedNodes / (double)totalICFGNodes * 100.0;
1207 generalNumMap["ICFG_Coverage_Percent"] = (u32_t)(coveragePercent * 100); // Store as percentage * 100
1208 }
1209 else
1210 {
1211 generalNumMap["ICFG_Coverage_Percent"] = 0;
1212 }
1213
1214 u32_t callSiteNum = 0;
1218 for (const auto &it: *_ae->svfir->getICFG())
1219 {
1220 if (it.second->getFun())
1221 {
1222 funs.insert(it.second->getFun());
1223 // Check if this node was analyzed (across all entry points)
1224 if (_ae->allAnalyzedNodes.find(it.second) != _ae->allAnalyzedNodes.end())
1225 {
1226 analyzedFuns.insert(it.second->getFun());
1227 }
1228 }
1229 if (const CallICFGNode *callNode = dyn_cast<CallICFGNode>(it.second))
1230 {
1231 if (!isExtCall(callNode))
1232 {
1233 callSiteNum++;
1234 }
1235 else
1236 {
1238 }
1239 }
1240 }
1241 generalNumMap["Func_Num"] = funs.size();
1242 generalNumMap["Analyzed_Func_Num"] = analyzedFuns.size();
1243
1244 // Function coverage percentage
1245 if (funs.size() > 0)
1246 {
1247 double funcCoveragePercent = (double)analyzedFuns.size() / (double)funs.size() * 100.0;
1248 generalNumMap["Func_Coverage_Percent"] = (u32_t)(funcCoveragePercent * 100); // Store as percentage * 100
1249 }
1250 else
1251 {
1252 generalNumMap["Func_Coverage_Percent"] = 0;
1253 }
1254
1255 generalNumMap["EXT_CallSite_Num"] = extCallSiteNum;
1256 generalNumMap["NonEXT_CallSite_Num"] = callSiteNum;
1257 timeStatMap["Total_Time(sec)"] = (double)(endTime - startTime) / TIMEINTERVAL;
1258
1259}
1260
1262{
1263 std::string fullName(_ae->moduleName);
1264 std::string name;
1265 std::string moduleName;
1266 if (fullName.find('/') == std::string::npos)
1267 {
1268 std::string name = fullName;
1269 moduleName = name.substr(0, fullName.find('.'));
1270 }
1271 else
1272 {
1273 std::string name = fullName.substr(fullName.find('/'), fullName.size());
1274 moduleName = name.substr(0, fullName.find('.'));
1275 }
1276
1277 SVFUtil::outs() << "\n************************\n";
1278 SVFUtil::outs() << "################ (program : " << moduleName << ")###############\n";
1279 SVFUtil::outs().flags(std::ios::left);
1280 unsigned field_width = 30;
1281 for (NUMStatMap::iterator it = generalNumMap.begin(), eit = generalNumMap.end(); it != eit; ++it)
1282 {
1283 // Special handling for percentage fields (stored as percentage * 100)
1284 if (it->first == "ICFG_Coverage_Percent" || it->first == "Func_Coverage_Percent")
1285 {
1286 double percent = (double)it->second / 100.0;
1287 std::cout << std::setw(field_width) << it->first << std::fixed << std::setprecision(2) << percent << "%\n";
1288 }
1289 else
1290 {
1291 std::cout << std::setw(field_width) << it->first << it->second << "\n";
1292 }
1293 }
1294 SVFUtil::outs() << "-------------------------------------------------------\n";
1295 for (TIMEStatMap::iterator it = timeStatMap.begin(), eit = timeStatMap.end(); it != eit; ++it)
1296 {
1297 // format out put with width 20 space
1298 SVFUtil::outs() << std::setw(field_width) << it->first << it->second << "\n";
1299 }
1300 SVFUtil::outs() << "Memory usage: " << memUsage << "\n";
1301
1302 SVFUtil::outs() << "#######################################################" << std::endl;
1303 SVFUtil::outs().flush();
1304}
1305
1307{
1308 // traverse every ICFGNode
1309 Set<std::string> ae_checkpoint_names = {"svf_assert"};
1310 Set<std::string> buf_checkpoint_names = {"UNSAFE_BUFACCESS", "SAFE_BUFACCESS"};
1311 Set<std::string> nullptr_checkpoint_names = {"UNSAFE_LOAD", "SAFE_LOAD"};
1312
1313 for (auto it = svfir->getICFG()->begin(); it != svfir->getICFG()->end(); ++it)
1314 {
1315 const ICFGNode* node = it->second;
1316 if (const CallICFGNode *call = SVFUtil::dyn_cast<CallICFGNode>(node))
1317 {
1318 if (const FunObjVar *fun = call->getCalledFunction())
1319 {
1320 if (ae_checkpoint_names.find(fun->getName()) !=
1321 ae_checkpoint_names.end())
1322 {
1323 checkpoints.insert(call);
1324 }
1326 {
1327 if (buf_checkpoint_names.find(fun->getName()) !=
1329 {
1330 checkpoints.insert(call);
1331 }
1332 }
1334 {
1335 if (nullptr_checkpoint_names.find(fun->getName()) !=
1337 {
1338 checkpoints.insert(call);
1339 }
1340 }
1341 }
1342 }
1343 }
1344}
1345
1347{
1348 if (checkpoints.size() == 0)
1349 {
1350 return;
1351 }
1352 else
1353 {
1354 SVFUtil::errs() << SVFUtil::errMsg("At least one svf_assert has not been checked!!") << "\n";
1355 for (const CallICFGNode* call: checkpoints)
1356 SVFUtil::errs() << call->toString() + "\n";
1357 assert(false);
1358 }
1359
1360}
1361
1363{
1364 AbstractState& as = getAbsStateFromTrace(gep->getICFGNode());
1365 u32_t rhs = gep->getRHSVarID();
1366 u32_t lhs = gep->getLHSVarID();
1367 IntervalValue offsetPair = as.getElementIndex(gep);
1369 APOffset lb = offsetPair.lb().getIntNumeral() < Options::MaxFieldLimit()?
1370 offsetPair.lb().getIntNumeral(): Options::MaxFieldLimit();
1371 APOffset ub = offsetPair.ub().getIntNumeral() < Options::MaxFieldLimit()?
1372 offsetPair.ub().getIntNumeral(): Options::MaxFieldLimit();
1373 for (APOffset i = lb; i <= ub; i++)
1374 gepAddrs.join_with(as.getGepObjAddrs(rhs,IntervalValue(i)));
1375 as[lhs] = gepAddrs;
1376}
1377
1379{
1380 AbstractState& as = getAbsStateFromTrace(select->getICFGNode());
1381 u32_t res = select->getResID();
1382 u32_t tval = select->getTrueValue()->getId();
1383 u32_t fval = select->getFalseValue()->getId();
1384 u32_t cond = select->getCondition()->getId();
1385 if (as[cond].getInterval().is_numeral())
1386 {
1387 as[res] = as[cond].getInterval().is_zero() ? as[fval] : as[tval];
1388 }
1389 else
1390 {
1391 as[res] = as[tval];
1392 as[res].join_with(as[fval]);
1393 }
1394}
1395
1397{
1398 const ICFGNode* icfgNode = phi->getICFGNode();
1400 u32_t res = phi->getResID();
1402 for (u32_t i = 0; i < phi->getOpVarNum(); i++)
1403 {
1404 NodeID curId = phi->getOpVarID(i);
1405 const ICFGNode* opICFGNode = phi->getOpICFGNode(i);
1407 {
1411 // if IntraEdge, check the condition, if it is feasible, join the value
1412 // if IntraEdge but not conditional edge, join the value
1413 // if not IntraEdge, join the value
1414 if (edge)
1415 {
1416 const IntraCFGEdge* intraEdge = SVFUtil::cast<IntraCFGEdge>(edge);
1417 if (intraEdge->getCondition())
1418 {
1420 rhs.join_with(opAs[curId]);
1421 }
1422 else
1423 rhs.join_with(opAs[curId]);
1424 }
1425 else
1426 {
1427 rhs.join_with(opAs[curId]);
1428 }
1429 }
1430 }
1431 as[res] = rhs;
1432}
1433
1434
1436{
1437 AbstractState& as = getAbsStateFromTrace(callPE->getICFGNode());
1438 NodeID lhs = callPE->getLHSVarID();
1439 NodeID rhs = callPE->getRHSVarID();
1440 as[lhs] = as[rhs];
1441}
1442
1444{
1446 NodeID lhs = retPE->getLHSVarID();
1447 NodeID rhs = retPE->getRHSVarID();
1448 as[lhs] = as[rhs];
1449}
1450
1451
1453{
1454 AbstractState& as = getAbsStateFromTrace(addr->getICFGNode());
1455 as.initObjVar(SVFUtil::cast<ObjVar>(addr->getRHSVar()));
1456 if (addr->getRHSVar()->getType()->getKind() == SVFType::SVFIntegerTy)
1457 as[addr->getRHSVarID()].getInterval().meet_with(utils->getRangeLimitFromType(addr->getRHSVar()->getType()));
1458 as[addr->getLHSVarID()] = as[addr->getRHSVarID()];
1459}
1460
1461
1463{
1467 const ICFGNode* node = binary->getICFGNode();
1469 u32_t op0 = binary->getOpVarID(0);
1470 u32_t op1 = binary->getOpVarID(1);
1471 u32_t res = binary->getResID();
1472 if (!as.inVarToValTable(op0)) as[op0] = IntervalValue::top();
1473 if (!as.inVarToValTable(op1)) as[op1] = IntervalValue::top();
1474 IntervalValue &lhs = as[op0].getInterval(), &rhs = as[op1].getInterval();
1476 switch (binary->getOpcode())
1477 {
1478 case BinaryOPStmt::Add:
1479 case BinaryOPStmt::FAdd:
1480 resVal = (lhs + rhs);
1481 break;
1482 case BinaryOPStmt::Sub:
1483 case BinaryOPStmt::FSub:
1484 resVal = (lhs - rhs);
1485 break;
1486 case BinaryOPStmt::Mul:
1487 case BinaryOPStmt::FMul:
1488 resVal = (lhs * rhs);
1489 break;
1490 case BinaryOPStmt::SDiv:
1491 case BinaryOPStmt::FDiv:
1492 case BinaryOPStmt::UDiv:
1493 resVal = (lhs / rhs);
1494 break;
1495 case BinaryOPStmt::SRem:
1496 case BinaryOPStmt::FRem:
1497 case BinaryOPStmt::URem:
1498 resVal = (lhs % rhs);
1499 break;
1500 case BinaryOPStmt::Xor:
1501 resVal = (lhs ^ rhs);
1502 break;
1503 case BinaryOPStmt::And:
1504 resVal = (lhs & rhs);
1505 break;
1506 case BinaryOPStmt::Or:
1507 resVal = (lhs | rhs);
1508 break;
1509 case BinaryOPStmt::AShr:
1510 resVal = (lhs >> rhs);
1511 break;
1512 case BinaryOPStmt::Shl:
1513 resVal = (lhs << rhs);
1514 break;
1515 case BinaryOPStmt::LShr:
1516 resVal = (lhs >> rhs);
1517 break;
1518 default:
1519 assert(false && "undefined binary: ");
1520 }
1521 as[res] = resVal;
1522}
1523
1525{
1526 AbstractState& as = getAbsStateFromTrace(cmp->getICFGNode());
1527 u32_t op0 = cmp->getOpVarID(0);
1528 u32_t op1 = cmp->getOpVarID(1);
1529 // if it is address
1530 if (as.inVarToAddrsTable(op0) && as.inVarToAddrsTable(op1))
1531 {
1533 AddressValue addrOp0 = as[op0].getAddrs();
1534 AddressValue addrOp1 = as[op1].getAddrs();
1535 u32_t res = cmp->getResID();
1536 if (addrOp0.equals(addrOp1))
1537 {
1538 resVal = IntervalValue(1, 1);
1539 }
1540 else if (addrOp0.hasIntersect(addrOp1))
1541 {
1542 resVal = IntervalValue(0, 1);
1543 }
1544 else
1545 {
1546 resVal = IntervalValue(0, 0);
1547 }
1548 as[res] = resVal;
1549 }
1550 // if op0 or op1 is nullptr, compare abstractValue instead of touching addr or interval
1551 else if (op0 == IRGraph::NullPtr || op1 == IRGraph::NullPtr)
1552 {
1553 u32_t res = cmp->getResID();
1554 IntervalValue resVal = (as[op0].equals(as[op1])) ? IntervalValue(1, 1) : IntervalValue(0, 0);
1555 as[res] = resVal;
1556 }
1557 else
1558 {
1559 if (!as.inVarToValTable(op0))
1561 if (!as.inVarToValTable(op1))
1563 u32_t res = cmp->getResID();
1564 if (as.inVarToValTable(op0) && as.inVarToValTable(op1))
1565 {
1567 if (as[op0].isInterval() && as[op1].isInterval())
1568 {
1569 IntervalValue &lhs = as[op0].getInterval(),
1570 &rhs = as[op1].getInterval();
1571 // AbstractValue
1572 auto predicate = cmp->getPredicate();
1573 switch (predicate)
1574 {
1575 case CmpStmt::ICMP_EQ:
1576 case CmpStmt::FCMP_OEQ:
1577 case CmpStmt::FCMP_UEQ:
1578 resVal = (lhs == rhs);
1579 // resVal = (lhs.getInterval() == rhs.getInterval());
1580 break;
1581 case CmpStmt::ICMP_NE:
1582 case CmpStmt::FCMP_ONE:
1583 case CmpStmt::FCMP_UNE:
1584 resVal = (lhs != rhs);
1585 break;
1586 case CmpStmt::ICMP_UGT:
1587 case CmpStmt::ICMP_SGT:
1588 case CmpStmt::FCMP_OGT:
1589 case CmpStmt::FCMP_UGT:
1590 resVal = (lhs > rhs);
1591 break;
1592 case CmpStmt::ICMP_UGE:
1593 case CmpStmt::ICMP_SGE:
1594 case CmpStmt::FCMP_OGE:
1595 case CmpStmt::FCMP_UGE:
1596 resVal = (lhs >= rhs);
1597 break;
1598 case CmpStmt::ICMP_ULT:
1599 case CmpStmt::ICMP_SLT:
1600 case CmpStmt::FCMP_OLT:
1601 case CmpStmt::FCMP_ULT:
1602 resVal = (lhs < rhs);
1603 break;
1604 case CmpStmt::ICMP_ULE:
1605 case CmpStmt::ICMP_SLE:
1606 case CmpStmt::FCMP_OLE:
1607 case CmpStmt::FCMP_ULE:
1608 resVal = (lhs <= rhs);
1609 break;
1611 resVal = IntervalValue(0, 0);
1612 break;
1613 case CmpStmt::FCMP_TRUE:
1614 resVal = IntervalValue(1, 1);
1615 break;
1616 case CmpStmt::FCMP_ORD:
1617 case CmpStmt::FCMP_UNO:
1618 // FCMP_ORD: true if both operands are not NaN
1619 // FCMP_UNO: true if either operand is NaN
1620 // Conservatively return [0, 1] since we don't track NaN
1621 resVal = IntervalValue(0, 1);
1622 break;
1623 default:
1624 assert(false && "undefined compare: ");
1625 }
1626 as[res] = resVal;
1627 }
1628 else if (as[op0].isAddr() && as[op1].isAddr())
1629 {
1630 AddressValue &lhs = as[op0].getAddrs(),
1631 &rhs = as[op1].getAddrs();
1632 auto predicate = cmp->getPredicate();
1633 switch (predicate)
1634 {
1635 case CmpStmt::ICMP_EQ:
1636 case CmpStmt::FCMP_OEQ:
1637 case CmpStmt::FCMP_UEQ:
1638 {
1639 if (lhs.hasIntersect(rhs))
1640 {
1641 resVal = IntervalValue(0, 1);
1642 }
1643 else if (lhs.empty() && rhs.empty())
1644 {
1645 resVal = IntervalValue(1, 1);
1646 }
1647 else
1648 {
1649 resVal = IntervalValue(0, 0);
1650 }
1651 break;
1652 }
1653 case CmpStmt::ICMP_NE:
1654 case CmpStmt::FCMP_ONE:
1655 case CmpStmt::FCMP_UNE:
1656 {
1657 if (lhs.hasIntersect(rhs))
1658 {
1659 resVal = IntervalValue(0, 1);
1660 }
1661 else if (lhs.empty() && rhs.empty())
1662 {
1663 resVal = IntervalValue(0, 0);
1664 }
1665 else
1666 {
1667 resVal = IntervalValue(1, 1);
1668 }
1669 break;
1670 }
1671 case CmpStmt::ICMP_UGT:
1672 case CmpStmt::ICMP_SGT:
1673 case CmpStmt::FCMP_OGT:
1674 case CmpStmt::FCMP_UGT:
1675 {
1676 if (lhs.size() == 1 && rhs.size() == 1)
1677 {
1678 resVal = IntervalValue(*lhs.begin() > *rhs.begin());
1679 }
1680 else
1681 {
1682 resVal = IntervalValue(0, 1);
1683 }
1684 break;
1685 }
1686 case CmpStmt::ICMP_UGE:
1687 case CmpStmt::ICMP_SGE:
1688 case CmpStmt::FCMP_OGE:
1689 case CmpStmt::FCMP_UGE:
1690 {
1691 if (lhs.size() == 1 && rhs.size() == 1)
1692 {
1693 resVal = IntervalValue(*lhs.begin() >= *rhs.begin());
1694 }
1695 else
1696 {
1697 resVal = IntervalValue(0, 1);
1698 }
1699 break;
1700 }
1701 case CmpStmt::ICMP_ULT:
1702 case CmpStmt::ICMP_SLT:
1703 case CmpStmt::FCMP_OLT:
1704 case CmpStmt::FCMP_ULT:
1705 {
1706 if (lhs.size() == 1 && rhs.size() == 1)
1707 {
1708 resVal = IntervalValue(*lhs.begin() < *rhs.begin());
1709 }
1710 else
1711 {
1712 resVal = IntervalValue(0, 1);
1713 }
1714 break;
1715 }
1716 case CmpStmt::ICMP_ULE:
1717 case CmpStmt::ICMP_SLE:
1718 case CmpStmt::FCMP_OLE:
1719 case CmpStmt::FCMP_ULE:
1720 {
1721 if (lhs.size() == 1 && rhs.size() == 1)
1722 {
1723 resVal = IntervalValue(*lhs.begin() <= *rhs.begin());
1724 }
1725 else
1726 {
1727 resVal = IntervalValue(0, 1);
1728 }
1729 break;
1730 }
1732 resVal = IntervalValue(0, 0);
1733 break;
1734 case CmpStmt::FCMP_TRUE:
1735 resVal = IntervalValue(1, 1);
1736 break;
1737 case CmpStmt::FCMP_ORD:
1738 case CmpStmt::FCMP_UNO:
1739 // FCMP_ORD: true if both operands are not NaN
1740 // FCMP_UNO: true if either operand is NaN
1741 // Conservatively return [0, 1] since we don't track NaN
1742 resVal = IntervalValue(0, 1);
1743 break;
1744 default:
1745 assert(false && "undefined compare: ");
1746 }
1747 as[res] = resVal;
1748 }
1749 }
1750 }
1751}
1752
1754{
1756 u32_t rhs = load->getRHSVarID();
1757 u32_t lhs = load->getLHSVarID();
1758 as[lhs] = as.loadValue(rhs);
1759}
1760
1762{
1764 u32_t rhs = store->getRHSVarID();
1765 u32_t lhs = store->getLHSVarID();
1766 as.storeValue(lhs, as[rhs]);
1767}
1768
1770{
1771 auto getZExtValue = [](AbstractState& as, const SVFVar* var)
1772 {
1773 const SVFType* type = var->getType();
1774 if (SVFUtil::isa<SVFIntegerType>(type))
1775 {
1776 u32_t bits = type->getByteSize() * 8;
1777 if (as[var->getId()].getInterval().is_numeral())
1778 {
1779 if (bits == 8)
1780 {
1781 int8_t signed_i8_value = as[var->getId()].getInterval().getIntNumeral();
1784 }
1785 else if (bits == 16)
1786 {
1787 s16_t signed_i16_value = as[var->getId()].getInterval().getIntNumeral();
1790 }
1791 else if (bits == 32)
1792 {
1793 s32_t signed_i32_value = as[var->getId()].getInterval().getIntNumeral();
1796 }
1797 else if (bits == 64)
1798 {
1799 s64_t signed_i64_value = as[var->getId()].getInterval().getIntNumeral();
1801 // we only support i64 at most
1802 }
1803 else
1804 assert(false && "cannot support int type other than u8/16/32/64");
1805 }
1806 else
1807 {
1808 return IntervalValue::top(); // TODO: may have better solution
1809 }
1810 }
1811 return IntervalValue::top(); // TODO: may have better solution
1812 };
1813
1814 auto getTruncValue = [&](const AbstractState& as, const SVFVar* var,
1815 const SVFType* dstType)
1816 {
1817 const IntervalValue& itv = as[var->getId()].getInterval();
1818 if(itv.isBottom()) return itv;
1819 // get the value of ub and lb
1821 s64_t int_ub = itv.ub().getIntNumeral();
1822 // get dst type
1823 u32_t dst_bits = dstType->getByteSize() * 8;
1824 if (dst_bits == 8)
1825 {
1826 // get the signed value of ub and lb
1827 int8_t s8_lb = static_cast<int8_t>(int_lb);
1828 int8_t s8_ub = static_cast<int8_t>(int_ub);
1829 if (s8_lb > s8_ub)
1830 {
1831 // return range of s8
1833 }
1834 return IntervalValue(s8_lb, s8_ub);
1835 }
1836 else if (dst_bits == 16)
1837 {
1838 // get the signed value of ub and lb
1839 s16_t s16_lb = static_cast<s16_t>(int_lb);
1840 s16_t s16_ub = static_cast<s16_t>(int_ub);
1841 if (s16_lb > s16_ub)
1842 {
1843 // return range of s16
1845 }
1846 return IntervalValue(s16_lb, s16_ub);
1847 }
1848 else if (dst_bits == 32)
1849 {
1850 // get the signed value of ub and lb
1851 s32_t s32_lb = static_cast<s32_t>(int_lb);
1852 s32_t s32_ub = static_cast<s32_t>(int_ub);
1853 if (s32_lb > s32_ub)
1854 {
1855 // return range of s32
1857 }
1858 return IntervalValue(s32_lb, s32_ub);
1859 }
1860 else
1861 {
1862 assert(false && "cannot support dst int type other than u8/16/32");
1863 abort();
1864 }
1865 };
1866
1867 AbstractState& as = getAbsStateFromTrace(copy->getICFGNode());
1868 u32_t lhs = copy->getLHSVarID();
1869 u32_t rhs = copy->getRHSVarID();
1870
1871 if (copy->getCopyKind() == CopyStmt::COPYVAL)
1872 {
1873 as[lhs] = as[rhs];
1874 }
1875 else if (copy->getCopyKind() == CopyStmt::ZEXT)
1876 {
1877 as[lhs] = getZExtValue(as, copy->getRHSVar());
1878 }
1879 else if (copy->getCopyKind() == CopyStmt::SEXT)
1880 {
1881 as[lhs] = as[rhs].getInterval();
1882 }
1883 else if (copy->getCopyKind() == CopyStmt::FPTOSI)
1884 {
1885 as[lhs] = as[rhs].getInterval();
1886 }
1887 else if (copy->getCopyKind() == CopyStmt::FPTOUI)
1888 {
1889 as[lhs] = as[rhs].getInterval();
1890 }
1891 else if (copy->getCopyKind() == CopyStmt::SITOFP)
1892 {
1893 as[lhs] = as[rhs].getInterval();
1894 }
1895 else if (copy->getCopyKind() == CopyStmt::UITOFP)
1896 {
1897 as[lhs] = as[rhs].getInterval();
1898 }
1899 else if (copy->getCopyKind() == CopyStmt::TRUNC)
1900 {
1901 as[lhs] = getTruncValue(as, copy->getRHSVar(), copy->getLHSVar()->getType());
1902 }
1903 else if (copy->getCopyKind() == CopyStmt::FPTRUNC)
1904 {
1905 as[lhs] = as[rhs].getInterval();
1906 }
1907 else if (copy->getCopyKind() == CopyStmt::INTTOPTR)
1908 {
1909 //insert nullptr
1910 }
1911 else if (copy->getCopyKind() == CopyStmt::PTRTOINT)
1912 {
1914 }
1915 else if (copy->getCopyKind() == CopyStmt::BITCAST)
1916 {
1917 if (as[rhs].isAddr())
1918 {
1919 as[lhs] = as[rhs];
1920 }
1921 else
1922 {
1923 // do nothing
1924 }
1925 }
1926 else
1927 assert(false && "undefined copy kind");
1928}
1929
1930
1931
#define BlackHoleObjAddr
#define TIMEINTERVAL
Definition SVFType.h:621
newitem type
Definition cJSON.cpp:2739
copy
Definition cJSON.cpp:414
const char *const name
Definition cJSON.h:264
void performStat() override
std::string getMemUsage()
AbstractInterpretation * _ae
Handles external API calls and manages abstract states.
Definition AbsExtAPI.h:44
void handleExtAPI(const CallICFGNode *call)
Handles an external API call.
IntervalValue getRangeLimitFromType(const SVFType *type)
Gets the range limit from a type.
void updateStateOnCall(const CallPE *callPE)
const FunObjVar * getCallee(const CallICFGNode *callNode)
Get callee function: directly for direct calls, via pointer analysis for indirect calls.
std::vector< const ICFGNode * > getNextNodesOfCycle(const ICFGCycleWTO *cycle) const
void updateStateOnStore(const StoreStmt *store)
bool hasAbsStateFromTrace(const ICFGNode *node)
virtual void handleFunCall(const CallICFGNode *callNode)
void analyzeFromAllProgEntries()
Analyze all entry points (functions without callers)
void updateStateOnGep(const GepStmt *gep)
virtual void handleGlobalNode()
Global ICFGNode is handled at the entry of the program,.
bool mergeStatesFromPredecessors(const ICFGNode *icfgNode)
void handleFunction(const ICFGNode *funEntry, const CallICFGNode *caller=nullptr)
virtual bool isExtCall(const CallICFGNode *callNode)
void updateStateOnPhi(const PhiStmt *phi)
bool handleICFGNode(const ICFGNode *node)
bool isBranchFeasible(const IntraCFGEdge *intraEdge, AbstractState &as)
std::vector< std::unique_ptr< AEDetector > > detectors
std::vector< const ICFGNode * > getNextNodes(const ICFGNode *node) const
virtual void handleExtCall(const CallICFGNode *callNode)
AbstractState & getAbsStateFromTrace(const ICFGNode *node)
Retrieves the abstract state from the trace for a given ICFG node.
Set< const CallICFGNode * > checkpoints
bool isSwitchBranchFeasible(const SVFVar *var, s64_t succ, AbstractState &as)
void updateStateOnSelect(const SelectStmt *select)
virtual void handleSVFStatement(const SVFStmt *stmt)
bool skipRecursiveCall(const CallICFGNode *callNode)
Skip recursive callsites (within SCC); entry calls from outside SCC are not skipped.
SVFIR * svfir
protected data members, also used in subclasses
virtual void runOnModule(ICFG *icfg)
virtual bool isRecursiveCallSite(const CallICFGNode *callNode, const FunObjVar *)
Check if caller and callee are in the same CallGraph SCC (i.e. a recursive callsite)
bool shouldApplyNarrowing(const FunObjVar *fun)
Check if narrowing should be applied: always for regular loops, mode-dependent for recursion.
virtual bool isRecursiveFun(const FunObjVar *fun)
Check if a function is recursive (part of a call graph SCC)
void updateStateOnAddr(const AddrStmt *addr)
virtual ~AbstractInterpretation()
Destructor.
bool isCmpBranchFeasible(const CmpStmt *cmpStmt, s64_t succ, AbstractState &as)
virtual void handleCallSite(const ICFGNode *node)
void updateStateOnRet(const RetPE *retPE)
void updateStateOnCopy(const CopyStmt *copy)
std::deque< const FunObjVar * > collectProgEntryFuns()
Get all entry point functions (functions without callers)
void updateStateOnLoad(const LoadStmt *load)
void updateStateOnBinary(const BinaryOPStmt *binary)
Map< const ICFGNode *, AbstractState > abstractTrace
Set< const ICFGNode * > allAnalyzedNodes
virtual void handleSingletonWTO(const ICFGSingletonWTO *icfgSingletonWto)
handle instructions in svf basic blocks
virtual void setTopToObjInRecursion(const CallICFGNode *callnode)
Set all store values in a recursive function to TOP (used in TOP mode)
virtual void handleRecursiveCall(const CallICFGNode *callNode)
Handle recursive call in TOP mode: set all stores and return value to TOP.
virtual void handleLoopOrRecursion(const ICFGCycleWTO *cycle, const CallICFGNode *caller=nullptr)
Map< s32_t, s32_t > _switch_lhsrhs_predicate
void updateStateOnCmp(const CmpStmt *cmp)
AbstractState narrowing(const AbstractState &other)
domain narrow with other, and return the narrowed domain
AbstractState widening(const AbstractState &other)
domain widen with other, and return the widened domain
AddressValue & getAddrs()
bool meet_with(const AddressValue &other)
Return a intersected AddressValue.
AddrSet::const_iterator begin() const
NodeID getRHSVarID() const
NodeID getLHSVarID() const
s64_t getIntNumeral() const
const FunObjVar * getFunction() const
Get function of this call node.
Definition CallGraph.h:191
bool hasIndCSCallees(const CallICFGNode *cs) const
Definition CallGraph.h:335
const FunctionSet & getIndCSCallees(const CallICFGNode *cs) const
Definition CallGraph.h:339
@ ICMP_SGT
signed greater than
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ ICMP_UGE
unsigned greater or equal
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ ICMP_ULE
unsigned less or equal
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ ICMP_NE
not equal
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
@ ICMP_ULT
unsigned less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ ICMP_SLT
signed less than
@ ICMP_UGT
unsigned greater than
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_SLE
signed less or equal
bool empty() const
Definition WorkList.h:161
bool isDeclaration() const
iterator begin()
Iterators.
u32_t nodeNum
total num of edge
const GEdgeSetTy & getOutEdges() const
const GEdgeSetTy & getInEdges() const
virtual const FunObjVar * getFun() const
Return the function of this ICFGNode.
Definition ICFGNode.h:74
virtual const std::string toString() const
Definition ICFG.cpp:49
const SVFStmtList & getSVFStmts() const
Definition ICFGNode.h:115
ICFGEdge * getICFGEdge(const ICFGNode *src, const ICFGNode *dst, ICFGEdge::ICFGEdgeK kind)
Get a SVFG edge according to src and dst.
Definition ICFG.cpp:311
void updateCallGraph(CallGraph *callgraph)
update ICFG for indirect calls
Definition ICFG.cpp:427
FunEntryICFGNode * getFunEntryICFGNode(const FunObjVar *fun)
Add a function entry node.
Definition ICFG.cpp:242
GlobalICFGNode * getGlobalICFGNode() const
Definition ICFG.h:244
NodeID getBlkPtr() const
Definition IRGraph.h:255
void meet_with(const IntervalValue &other)
Return a intersected IntervalValue.
static BoundedInt minus_infinity()
Get minus infinity -inf.
static BoundedInt plus_infinity()
Get plus infinity +inf.
static IntervalValue top()
Create the IntervalValue [-inf, +inf].
const BoundedInt & lb() const
Return the lower bound.
static const OptionMap< u32_t > HandleRecur
recursion handling mode, Default: TOP
Definition Options.h:245
static const Option< u32_t > MaxFieldLimit
Maximum number of field derivations for an object.
Definition Options.h:35
static const Option< u32_t > WidenDelay
Definition Options.h:243
static const Option< bool > NullDerefCheck
nullptr dereference checker, Default: false
Definition Options.h:253
static const Option< bool > PStat
Definition Options.h:116
static const Option< bool > BufferOverflowCheck
buffer overflow checker, Default: false
Definition Options.h:251
bool inSameCallGraphSCC(const FunObjVar *fun1, const FunObjVar *fun2)
Return TRUE if this edge is inside a PTACallGraph SCC, i.e., src node and dst node are in the same SC...
bool isInRecursion(const FunObjVar *fun) const
const Map< const FunObjVar *, const ICFGWTO * > & getFuncToWTO() const
Accessors for WTO data.
Definition PreAnalysis.h:73
AndersenWaveDiff * getPointerAnalysis() const
Accessors for Andersen's results.
Definition PreAnalysis.h:56
void initWTO()
Build WTO for each function using call graph SCC.
CallGraph * getCallGraph() const
Definition PreAnalysis.h:60
ICFG * getICFG() const
Definition SVFIR.h:217
const CallSiteToFunPtrMap & getIndirectCallsites() const
Add/get indirect callsites.
Definition SVFIR.h:433
const SVFVar * getSVFVar(NodeID id) const
ObjVar/GepObjVar/BaseObjVar.
Definition SVFIR.h:131
static SVFIR * getPAG(bool buildFromFile=false)
Singleton design here to make sure we only have one instance during any analysis.
Definition SVFIR.h:116
NUMStatMap generalNumMap
Definition SVFStat.h:76
virtual void endClk()
Definition SVFStat.h:63
std::string moduleName
Definition SVFStat.h:99
TIMEStatMap timeStatMap
Definition SVFStat.h:78
virtual void startClk()
Definition SVFStat.h:58
double endTime
Definition SVFStat.h:81
double startTime
Definition SVFStat.h:80
ICFGNode * getICFGNode() const
u32_t getByteSize() const
Definition SVFType.h:289
virtual const std::string & getName() const
Definition SVFValue.h:186
std::string errMsg(const std::string &msg)
Print error message by converting a string into red string output.
Definition SVFUtil.cpp:78
std::ostream & errs()
Overwrite llvm::errs()
Definition SVFUtil.h:58
bool isExtCall(const FunObjVar *fun)
Definition SVFUtil.cpp:437
std::ostream & outs()
Overwrite llvm::outs()
Definition SVFUtil.h:52
for isBitcode
Definition BasicTypes.h:68
u32_t NodeID
Definition GeneralType.h:56
s64_t APOffset
Definition GeneralType.h:60
signed short s16_t
Definition GeneralType.h:54
unsigned short u16_t
Definition GeneralType.h:53
llvm::IRBuilder IRBuilder
Definition BasicTypes.h:74
signed s32_t
Definition GeneralType.h:48
unsigned u32_t
Definition GeneralType.h:47
signed long long s64_t
Definition GeneralType.h:50
WTOComponent< ICFG > ICFGWTOComp
Definition ICFGWTO.h:44