< prev index next >

src/hotspot/share/opto/callnode.hpp

Print this page

  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match( const ProjNode *proj, const Matcher *m );
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;
  93   static  const TypeTuple *osr_domain();
  94 };
  95 
  96 
  97 //------------------------------ParmNode---------------------------------------
  98 // Incoming parameters
  99 class ParmNode : public ProjNode {
 100   static const char * const names[TypeFunc::Parms+1];
 101 public:
 102   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 103     init_class_id(Class_Parm);
 104   }
 105   virtual int Opcode() const;
 106   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 107   virtual uint ideal_reg() const;
 108 #ifndef PRODUCT
 109   virtual void dump_spec(outputStream *st) const;
 110   virtual void dump_compact_spec(outputStream *st) const;
 111 #endif
 112 };
 113 

 634     assert(jvms != nullptr, "JVMS reference is null.");
 635     return jvms->scloff() + _merge_pointer_idx + 1;
 636   }
 637 
 638   // Assumes that "this" is an argument to a safepoint node "s", and that
 639   // "new_call" is being created to correspond to "s".  But the difference
 640   // between the start index of the jvmstates of "new_call" and "s" is
 641   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 642   // corresponds appropriately to "this" in "new_call".  Assumes that
 643   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 644   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 645   SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
 646 
 647 #ifndef PRODUCT
 648   virtual void              dump_spec(outputStream *st) const;
 649 #endif
 650 };
 651 
 652 // Simple container for the outgoing projections of a call.  Useful
 653 // for serious surgery on calls.
 654 class CallProjections : public StackObj {
 655 public:
 656   Node* fallthrough_proj;
 657   Node* fallthrough_catchproj;
 658   Node* fallthrough_memproj;
 659   Node* fallthrough_ioproj;
 660   Node* catchall_catchproj;
 661   Node* catchall_memproj;
 662   Node* catchall_ioproj;
 663   Node* resproj;
 664   Node* exobj;



















 665 };
 666 
 667 class CallGenerator;
 668 
 669 //------------------------------CallNode---------------------------------------
 670 // Call nodes now subsume the function of debug nodes at callsites, so they
 671 // contain the functionality of a full scope chain of debug nodes.
 672 class CallNode : public SafePointNode {
 673 
 674 protected:
 675   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
 676 
 677 public:
 678   const TypeFunc* _tf;          // Function type
 679   address         _entry_point; // Address of method being called
 680   float           _cnt;         // Estimate of number of times called
 681   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 682   const char*     _name;        // Printable name, if _method is null
 683 
 684   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 685     : SafePointNode(tf->domain()->cnt(), jvms, adr_type),
 686       _tf(tf),
 687       _entry_point(addr),
 688       _cnt(COUNT_UNKNOWN),
 689       _generator(nullptr),
 690       _name(nullptr)
 691   {
 692     init_class_id(Class_Call);
 693   }
 694 
 695   const TypeFunc* tf()         const { return _tf; }
 696   address  entry_point()       const { return _entry_point; }
 697   float    cnt()               const { return _cnt; }
 698   CallGenerator* generator()   const { return _generator; }
 699 
 700   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 701   void set_entry_point(address p)       { _entry_point = p; }
 702   void set_cnt(float c)                 { _cnt = c; }
 703   void set_generator(CallGenerator* cg) { _generator = cg; }
 704 
 705   virtual const Type* bottom_type() const;
 706   virtual const Type* Value(PhaseGVN* phase) const;
 707   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 708   virtual Node* Identity(PhaseGVN* phase) { return this; }
 709   virtual bool        cmp(const Node &n) const;
 710   virtual uint        size_of() const = 0;
 711   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 712   virtual Node*       match(const ProjNode* proj, const Matcher* m);
 713   virtual uint        ideal_reg() const { return NotAMachineReg; }
 714   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 715   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 716   virtual bool        guaranteed_safepoint()  { return true; }
 717   // For macro nodes, the JVMState gets modified during expansion. If calls
 718   // use MachConstantBase, it gets modified during matching. So when cloning
 719   // the node the JVMState must be deep cloned. Default is to shallow clone.
 720   virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
 721 
 722   // Returns true if the call may modify n
 723   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
 724   // Does this node have a use of n other than in debug information?
 725   bool                has_non_debug_use(Node* n);

 726   // Returns the unique CheckCastPP of a call
 727   // or result projection is there are several CheckCastPP
 728   // or returns null if there is no one.
 729   Node* result_cast();
 730   // Does this node returns pointer?
 731   bool returns_pointer() const {
 732     const TypeTuple* r = tf()->range();
 733     return (r->cnt() > TypeFunc::Parms &&

 734             r->field_at(TypeFunc::Parms)->isa_ptr());
 735   }
 736 
 737   // Collect all the interesting edges from a call for use in
 738   // replacing the call by something else.  Used by macro expansion
 739   // and the late inlining support.
 740   void extract_projections(CallProjections* projs, bool separate_io_proj, bool do_asserts = true);
 741 
 742   virtual uint match_edge(uint idx) const;
 743 
 744   bool is_call_to_arraycopystub() const;
 745 
 746   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 747 
 748 #ifndef PRODUCT
 749   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 750   virtual void        dump_spec(outputStream* st) const;
 751 #endif
 752 };
 753 
 754 
 755 //------------------------------CallJavaNode-----------------------------------
 756 // Make a static or dynamic subroutine call node using Java calling
 757 // convention.  (The "Java" calling convention is the compiler's calling
 758 // convention, as opposed to the interpreter's or that of native C.)
 759 class CallJavaNode : public CallNode {
 760 protected:

 789   bool  override_symbolic_info() const     { return _override_symbolic_info; }
 790   void  set_arg_escape(bool f)             { _arg_escape = f; }
 791   bool  arg_escape() const                 { return _arg_escape; }
 792   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 793 
 794   DEBUG_ONLY( bool validate_symbolic_info() const; )
 795 
 796 #ifndef PRODUCT
 797   virtual void  dump_spec(outputStream *st) const;
 798   virtual void  dump_compact_spec(outputStream *st) const;
 799 #endif
 800 };
 801 
 802 //------------------------------CallStaticJavaNode-----------------------------
 803 // Make a direct subroutine call using Java calling convention (for static
 804 // calls and optimized virtual calls, plus calls to wrappers for run-time
 805 // routines); generates static stub.
 806 class CallStaticJavaNode : public CallJavaNode {
 807   virtual bool cmp( const Node &n ) const;
 808   virtual uint size_of() const; // Size is bigger



 809 public:
 810   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 811     : CallJavaNode(tf, addr, method) {
 812     init_class_id(Class_CallStaticJava);
 813     if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
 814       init_flags(Flag_is_macro);
 815       C->add_macro_node(this);
 816     }











 817   }
 818   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 819     : CallJavaNode(tf, addr, nullptr) {
 820     init_class_id(Class_CallStaticJava);
 821     // This node calls a runtime stub, which often has narrow memory effects.
 822     _adr_type = adr_type;
 823     _name = name;
 824   }
 825 
 826   // If this is an uncommon trap, return the request code, else zero.
 827   int uncommon_trap_request() const;
 828   bool is_uncommon_trap() const;
 829   static int extract_uncommon_trap_request(const Node* call);
 830 
 831   bool is_boxing_method() const {
 832     return is_macro() && (method() != nullptr) && method()->is_boxing_method();
 833   }
 834   // Late inlining modifies the JVMState, so we need to deep clone it
 835   // when the call node is cloned (because it is macro node).
 836   virtual bool needs_deep_clone_jvms(Compile* C) {

 907   }
 908   virtual int   Opcode() const;
 909   virtual bool        guaranteed_safepoint()  { return false; }
 910 #ifndef PRODUCT
 911   virtual void  dump_spec(outputStream *st) const;
 912 #endif
 913 };
 914 
 915 //------------------------------CallLeafNoFPNode-------------------------------
 916 // CallLeafNode, not using floating point or using it in the same manner as
 917 // the generated code
 918 class CallLeafNoFPNode : public CallLeafNode {
 919 public:
 920   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 921                    const TypePtr* adr_type)
 922     : CallLeafNode(tf, addr, name, adr_type)
 923   {
 924     init_class_id(Class_CallLeafNoFP);
 925   }
 926   virtual int   Opcode() const;

 927 };
 928 
 929 //------------------------------CallLeafVectorNode-------------------------------
 930 // CallLeafNode but calling with vector calling convention instead.
 931 class CallLeafVectorNode : public CallLeafNode {
 932 private:
 933   uint _num_bits;
 934 protected:
 935   virtual bool cmp( const Node &n ) const;
 936   virtual uint size_of() const; // Size is bigger
 937 public:
 938   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
 939                    const TypePtr* adr_type, uint num_bits)
 940     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
 941   {
 942   }
 943   virtual int   Opcode() const;
 944   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 945 };
 946 

 949 // High-level memory allocation
 950 //
 951 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 952 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 953 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 954 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 955 //  order to differentiate the uses of the projection on the normal control path from
 956 //  those on the exception return path.
 957 //
 958 class AllocateNode : public CallNode {
 959 public:
 960   enum {
 961     // Output:
 962     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
 963     // Inputs:
 964     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
 965     KlassNode,                        // type (maybe dynamic) of the obj.
 966     InitialTest,                      // slow-path test (may be constant)
 967     ALength,                          // array length (or TOP if none)
 968     ValidLengthTest,



 969     ParmLimit
 970   };
 971 
 972   static const TypeFunc* alloc_type(const Type* t) {
 973     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
 974     fields[AllocSize]   = TypeInt::POS;
 975     fields[KlassNode]   = TypeInstPtr::NOTNULL;
 976     fields[InitialTest] = TypeInt::BOOL;
 977     fields[ALength]     = t;  // length (can be a bad length)
 978     fields[ValidLengthTest] = TypeInt::BOOL;



 979 
 980     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
 981 
 982     // create result type (range)
 983     fields = TypeTuple::fields(1);
 984     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
 985 
 986     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
 987 
 988     return TypeFunc::make(domain, range);
 989   }
 990 
 991   // Result of Escape Analysis
 992   bool _is_scalar_replaceable;
 993   bool _is_non_escaping;
 994   // True when MemBar for new is redundant with MemBar at initialzer exit
 995   bool _is_allocation_MemBar_redundant;

 996 
 997   virtual uint size_of() const; // Size is bigger
 998   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
 999                Node *size, Node *klass_node, Node *initial_test);

1000   // Expansion modifies the JVMState, so we need to deep clone it
1001   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1002   virtual int Opcode() const;
1003   virtual uint ideal_reg() const { return Op_RegP; }
1004   virtual bool        guaranteed_safepoint()  { return false; }
1005 
1006   // allocations do not modify their arguments
1007   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1008 
1009   // Pattern-match a possible usage of AllocateNode.
1010   // Return null if no allocation is recognized.
1011   // The operand is the pointer produced by the (possible) allocation.
1012   // It must be a projection of the Allocate or its subsequent CastPP.
1013   // (Note:  This function is defined in file graphKit.cpp, near
1014   // GraphKit::new_instance/new_array, whose output it recognizes.)
1015   // The 'ptr' may not have an offset unless the 'offset' argument is given.
1016   static AllocateNode* Ideal_allocation(Node* ptr);
1017 
1018   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1019   // an offset, which is reported back to the caller.

1044 
1045   // Return true if allocation doesn't escape thread, its escape state
1046   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1047   // is true when its allocation's escape state is noEscape or
1048   // ArgEscape. In case allocation's InitializeNode is null, check
1049   // AlllocateNode._is_non_escaping flag.
1050   // AlllocateNode._is_non_escaping is true when its escape state is
1051   // noEscape.
1052   bool does_not_escape_thread() {
1053     InitializeNode* init = nullptr;
1054     return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
1055   }
1056 
1057   // If object doesn't escape in <.init> method and there is memory barrier
1058   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1059   // Inovke this method when MemBar at exit of initializer and post-dominate
1060   // allocation node.
1061   void compute_MemBar_redundancy(ciMethod* initializer);
1062   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1063 
1064   Node* make_ideal_mark(PhaseGVN *phase, Node* obj, Node* control, Node* mem);
1065 
1066   NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
1067 };
1068 
1069 //------------------------------AllocateArray---------------------------------
1070 //
1071 // High-level array allocation
1072 //
1073 class AllocateArrayNode : public AllocateNode {
1074 public:
1075   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1076                     Node* initial_test, Node* count_val, Node* valid_length_test)

1077     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1078                    initial_test)
1079   {
1080     init_class_id(Class_AllocateArray);
1081     set_req(AllocateNode::ALength,        count_val);
1082     set_req(AllocateNode::ValidLengthTest, valid_length_test);


1083   }

1084   virtual int Opcode() const;
1085 
1086   // Dig the length operand out of a array allocation site.
1087   Node* Ideal_length() {
1088     return in(AllocateNode::ALength);
1089   }
1090 
1091   // Dig the length operand out of a array allocation site and narrow the
1092   // type with a CastII, if necesssary
1093   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1094 
1095   // Pattern-match a possible usage of AllocateArrayNode.
1096   // Return null if no allocation is recognized.
1097   static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1098     AllocateNode* allo = Ideal_allocation(ptr);
1099     return (allo == nullptr || !allo->is_AllocateArray())
1100            ? nullptr : allo->as_AllocateArray();
1101   }
1102 };
1103 

  59 //------------------------------StartNode--------------------------------------
  60 // The method start node
  61 class StartNode : public MultiNode {
  62   virtual bool cmp( const Node &n ) const;
  63   virtual uint size_of() const; // Size is bigger
  64 public:
  65   const TypeTuple *_domain;
  66   StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
  67     init_class_id(Class_Start);
  68     init_req(0,this);
  69     init_req(1,root);
  70   }
  71   virtual int Opcode() const;
  72   virtual bool pinned() const { return true; };
  73   virtual const Type *bottom_type() const;
  74   virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
  75   virtual const Type* Value(PhaseGVN* phase) const;
  76   virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
  77   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
  78   virtual const RegMask &in_RegMask(uint) const;
  79   virtual Node *match(const ProjNode *proj, const Matcher *m, const RegMask* mask);
  80   virtual uint ideal_reg() const { return 0; }
  81 #ifndef PRODUCT
  82   virtual void  dump_spec(outputStream *st) const;
  83   virtual void  dump_compact_spec(outputStream *st) const;
  84 #endif
  85 };
  86 
  87 //------------------------------StartOSRNode-----------------------------------
  88 // The method start node for on stack replacement code
  89 class StartOSRNode : public StartNode {
  90 public:
  91   StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
  92   virtual int   Opcode() const;

  93 };
  94 
  95 
  96 //------------------------------ParmNode---------------------------------------
  97 // Incoming parameters
  98 class ParmNode : public ProjNode {
  99   static const char * const names[TypeFunc::Parms+1];
 100 public:
 101   ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
 102     init_class_id(Class_Parm);
 103   }
 104   virtual int Opcode() const;
 105   virtual bool  is_CFG() const { return (_con == TypeFunc::Control); }
 106   virtual uint ideal_reg() const;
 107 #ifndef PRODUCT
 108   virtual void dump_spec(outputStream *st) const;
 109   virtual void dump_compact_spec(outputStream *st) const;
 110 #endif
 111 };
 112 

 633     assert(jvms != nullptr, "JVMS reference is null.");
 634     return jvms->scloff() + _merge_pointer_idx + 1;
 635   }
 636 
 637   // Assumes that "this" is an argument to a safepoint node "s", and that
 638   // "new_call" is being created to correspond to "s".  But the difference
 639   // between the start index of the jvmstates of "new_call" and "s" is
 640   // "jvms_adj".  Produce and return a SafePointScalarObjectNode that
 641   // corresponds appropriately to "this" in "new_call".  Assumes that
 642   // "sosn_map" is a map, specific to the translation of "s" to "new_call",
 643   // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
 644   SafePointScalarMergeNode* clone(Dict* sosn_map, bool& new_node) const;
 645 
 646 #ifndef PRODUCT
 647   virtual void              dump_spec(outputStream *st) const;
 648 #endif
 649 };
 650 
 651 // Simple container for the outgoing projections of a call.  Useful
 652 // for serious surgery on calls.
 653 class CallProjections {
 654 public:
 655   Node* fallthrough_proj;
 656   Node* fallthrough_catchproj;
 657   Node* fallthrough_memproj;
 658   Node* fallthrough_ioproj;
 659   Node* catchall_catchproj;
 660   Node* catchall_memproj;
 661   Node* catchall_ioproj;

 662   Node* exobj;
 663   uint nb_resproj;
 664   Node* resproj[1]; // at least one projection
 665 
 666   CallProjections(uint nbres) {
 667     fallthrough_proj      = nullptr;
 668     fallthrough_catchproj = nullptr;
 669     fallthrough_memproj   = nullptr;
 670     fallthrough_ioproj    = nullptr;
 671     catchall_catchproj    = nullptr;
 672     catchall_memproj      = nullptr;
 673     catchall_ioproj       = nullptr;
 674     exobj                 = nullptr;
 675     nb_resproj            = nbres;
 676     resproj[0]            = nullptr;
 677     for (uint i = 1; i < nb_resproj; i++) {
 678       resproj[i]          = nullptr;
 679     }
 680   }
 681 
 682 };
 683 
 684 class CallGenerator;
 685 
 686 //------------------------------CallNode---------------------------------------
 687 // Call nodes now subsume the function of debug nodes at callsites, so they
 688 // contain the functionality of a full scope chain of debug nodes.
 689 class CallNode : public SafePointNode {
 690 
 691 protected:
 692   bool may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr* t_oop, PhaseValues* phase);
 693 
 694 public:
 695   const TypeFunc* _tf;          // Function type
 696   address         _entry_point; // Address of method being called
 697   float           _cnt;         // Estimate of number of times called
 698   CallGenerator*  _generator;   // corresponding CallGenerator for some late inline calls
 699   const char*     _name;        // Printable name, if _method is null
 700 
 701   CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type, JVMState* jvms = nullptr)
 702     : SafePointNode(tf->domain_cc()->cnt(), jvms, adr_type),
 703       _tf(tf),
 704       _entry_point(addr),
 705       _cnt(COUNT_UNKNOWN),
 706       _generator(nullptr),
 707       _name(nullptr)
 708   {
 709     init_class_id(Class_Call);
 710   }
 711 
 712   const TypeFunc* tf()         const { return _tf; }
 713   address  entry_point()       const { return _entry_point; }
 714   float    cnt()               const { return _cnt; }
 715   CallGenerator* generator()   const { return _generator; }
 716 
 717   void set_tf(const TypeFunc* tf)       { _tf = tf; }
 718   void set_entry_point(address p)       { _entry_point = p; }
 719   void set_cnt(float c)                 { _cnt = c; }
 720   void set_generator(CallGenerator* cg) { _generator = cg; }
 721 
 722   virtual const Type* bottom_type() const;
 723   virtual const Type* Value(PhaseGVN* phase) const;
 724   virtual Node* Ideal(PhaseGVN* phase, bool can_reshape);
 725   virtual Node* Identity(PhaseGVN* phase) { return this; }
 726   virtual bool        cmp(const Node &n) const;
 727   virtual uint        size_of() const = 0;
 728   virtual void        calling_convention(BasicType* sig_bt, VMRegPair* parm_regs, uint argcnt) const;
 729   virtual Node*       match(const ProjNode* proj, const Matcher* m, const RegMask* mask);
 730   virtual uint        ideal_reg() const { return NotAMachineReg; }
 731   // Are we guaranteed that this node is a safepoint?  Not true for leaf calls and
 732   // for some macro nodes whose expansion does not have a safepoint on the fast path.
 733   virtual bool        guaranteed_safepoint()  { return true; }
 734   // For macro nodes, the JVMState gets modified during expansion. If calls
 735   // use MachConstantBase, it gets modified during matching. So when cloning
 736   // the node the JVMState must be deep cloned. Default is to shallow clone.
 737   virtual bool needs_deep_clone_jvms(Compile* C) { return C->needs_deep_clone_jvms(); }
 738 
 739   // Returns true if the call may modify n
 740   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase);
 741   // Does this node have a use of n other than in debug information?
 742   bool                has_non_debug_use(Node* n);
 743   bool                has_debug_use(Node* n);
 744   // Returns the unique CheckCastPP of a call
 745   // or result projection is there are several CheckCastPP
 746   // or returns null if there is no one.
 747   Node* result_cast();
 748   // Does this node returns pointer?
 749   bool returns_pointer() const {
 750     const TypeTuple* r = tf()->range_sig();
 751     return (!tf()->returns_inline_type_as_fields() &&
 752             r->cnt() > TypeFunc::Parms &&
 753             r->field_at(TypeFunc::Parms)->isa_ptr());
 754   }
 755 
 756   // Collect all the interesting edges from a call for use in
 757   // replacing the call by something else.  Used by macro expansion
 758   // and the late inlining support.
 759   CallProjections* extract_projections(bool separate_io_proj, bool do_asserts = true);
 760 
 761   virtual uint match_edge(uint idx) const;
 762 
 763   bool is_call_to_arraycopystub() const;
 764 
 765   virtual void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode* sfpt) {}
 766 
 767 #ifndef PRODUCT
 768   virtual void        dump_req(outputStream* st = tty, DumpConfig* dc = nullptr) const;
 769   virtual void        dump_spec(outputStream* st) const;
 770 #endif
 771 };
 772 
 773 
 774 //------------------------------CallJavaNode-----------------------------------
 775 // Make a static or dynamic subroutine call node using Java calling
 776 // convention.  (The "Java" calling convention is the compiler's calling
 777 // convention, as opposed to the interpreter's or that of native C.)
 778 class CallJavaNode : public CallNode {
 779 protected:

 808   bool  override_symbolic_info() const     { return _override_symbolic_info; }
 809   void  set_arg_escape(bool f)             { _arg_escape = f; }
 810   bool  arg_escape() const                 { return _arg_escape; }
 811   void copy_call_debug_info(PhaseIterGVN* phase, SafePointNode *sfpt);
 812 
 813   DEBUG_ONLY( bool validate_symbolic_info() const; )
 814 
 815 #ifndef PRODUCT
 816   virtual void  dump_spec(outputStream *st) const;
 817   virtual void  dump_compact_spec(outputStream *st) const;
 818 #endif
 819 };
 820 
 821 //------------------------------CallStaticJavaNode-----------------------------
 822 // Make a direct subroutine call using Java calling convention (for static
 823 // calls and optimized virtual calls, plus calls to wrappers for run-time
 824 // routines); generates static stub.
 825 class CallStaticJavaNode : public CallJavaNode {
 826   virtual bool cmp( const Node &n ) const;
 827   virtual uint size_of() const; // Size is bigger
 828 
 829   bool remove_unknown_flat_array_load(PhaseIterGVN* igvn, Node* ctl, Node* mem, Node* unc_arg);
 830 
 831 public:
 832   CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method)
 833     : CallJavaNode(tf, addr, method) {
 834     init_class_id(Class_CallStaticJava);
 835     if (C->eliminate_boxing() && (method != nullptr) && method->is_boxing_method()) {
 836       init_flags(Flag_is_macro);
 837       C->add_macro_node(this);
 838     }
 839     const TypeTuple *r = tf->range_sig();
 840     if (InlineTypeReturnedAsFields &&
 841         method != nullptr &&
 842         method->is_method_handle_intrinsic() &&
 843         r->cnt() > TypeFunc::Parms &&
 844         r->field_at(TypeFunc::Parms)->isa_oopptr() &&
 845         r->field_at(TypeFunc::Parms)->is_oopptr()->can_be_inline_type()) {
 846       // Make sure this call is processed by PhaseMacroExpand::expand_mh_intrinsic_return
 847       init_flags(Flag_is_macro);
 848       C->add_macro_node(this);
 849     }
 850   }
 851   CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, const TypePtr* adr_type)
 852     : CallJavaNode(tf, addr, nullptr) {
 853     init_class_id(Class_CallStaticJava);
 854     // This node calls a runtime stub, which often has narrow memory effects.
 855     _adr_type = adr_type;
 856     _name = name;
 857   }
 858 
 859   // If this is an uncommon trap, return the request code, else zero.
 860   int uncommon_trap_request() const;
 861   bool is_uncommon_trap() const;
 862   static int extract_uncommon_trap_request(const Node* call);
 863 
 864   bool is_boxing_method() const {
 865     return is_macro() && (method() != nullptr) && method()->is_boxing_method();
 866   }
 867   // Late inlining modifies the JVMState, so we need to deep clone it
 868   // when the call node is cloned (because it is macro node).
 869   virtual bool needs_deep_clone_jvms(Compile* C) {

 940   }
 941   virtual int   Opcode() const;
 942   virtual bool        guaranteed_safepoint()  { return false; }
 943 #ifndef PRODUCT
 944   virtual void  dump_spec(outputStream *st) const;
 945 #endif
 946 };
 947 
 948 //------------------------------CallLeafNoFPNode-------------------------------
 949 // CallLeafNode, not using floating point or using it in the same manner as
 950 // the generated code
 951 class CallLeafNoFPNode : public CallLeafNode {
 952 public:
 953   CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
 954                    const TypePtr* adr_type)
 955     : CallLeafNode(tf, addr, name, adr_type)
 956   {
 957     init_class_id(Class_CallLeafNoFP);
 958   }
 959   virtual int   Opcode() const;
 960   virtual uint match_edge(uint idx) const;
 961 };
 962 
 963 //------------------------------CallLeafVectorNode-------------------------------
 964 // CallLeafNode but calling with vector calling convention instead.
 965 class CallLeafVectorNode : public CallLeafNode {
 966 private:
 967   uint _num_bits;
 968 protected:
 969   virtual bool cmp( const Node &n ) const;
 970   virtual uint size_of() const; // Size is bigger
 971 public:
 972   CallLeafVectorNode(const TypeFunc* tf, address addr, const char* name,
 973                    const TypePtr* adr_type, uint num_bits)
 974     : CallLeafNode(tf, addr, name, adr_type), _num_bits(num_bits)
 975   {
 976   }
 977   virtual int   Opcode() const;
 978   virtual void  calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
 979 };
 980 

 983 // High-level memory allocation
 984 //
 985 //  AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
 986 //  get expanded into a code sequence containing a call.  Unlike other CallNodes,
 987 //  they have 2 memory projections and 2 i_o projections (which are distinguished by
 988 //  the _is_io_use flag in the projection.)  This is needed when expanding the node in
 989 //  order to differentiate the uses of the projection on the normal control path from
 990 //  those on the exception return path.
 991 //
 992 class AllocateNode : public CallNode {
 993 public:
 994   enum {
 995     // Output:
 996     RawAddress  = TypeFunc::Parms,    // the newly-allocated raw address
 997     // Inputs:
 998     AllocSize   = TypeFunc::Parms,    // size (in bytes) of the new object
 999     KlassNode,                        // type (maybe dynamic) of the obj.
1000     InitialTest,                      // slow-path test (may be constant)
1001     ALength,                          // array length (or TOP if none)
1002     ValidLengthTest,
1003     InlineType,                       // InlineTypeNode if this is an inline type allocation
1004     InitValue,                        // Init value for null-free inline type arrays
1005     RawInitValue,                     // Same as above but as raw machine word
1006     ParmLimit
1007   };
1008 
1009   static const TypeFunc* alloc_type(const Type* t) {
1010     const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
1011     fields[AllocSize]   = TypeInt::POS;
1012     fields[KlassNode]   = TypeInstPtr::NOTNULL;
1013     fields[InitialTest] = TypeInt::BOOL;
1014     fields[ALength]     = t;  // length (can be a bad length)
1015     fields[ValidLengthTest] = TypeInt::BOOL;
1016     fields[InlineType] = Type::BOTTOM;
1017     fields[InitValue] = TypeInstPtr::NOTNULL;
1018     fields[RawInitValue] = TypeX_X;
1019 
1020     const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
1021 
1022     // create result type (range)
1023     fields = TypeTuple::fields(1);
1024     fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
1025 
1026     const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
1027 
1028     return TypeFunc::make(domain, range);
1029   }
1030 
1031   // Result of Escape Analysis
1032   bool _is_scalar_replaceable;
1033   bool _is_non_escaping;
1034   // True when MemBar for new is redundant with MemBar at initialzer exit
1035   bool _is_allocation_MemBar_redundant;
1036   bool _larval;
1037 
1038   virtual uint size_of() const; // Size is bigger
1039   AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
1040                Node *size, Node *klass_node, Node *initial_test,
1041                InlineTypeNode* inline_type_node = nullptr);
1042   // Expansion modifies the JVMState, so we need to deep clone it
1043   virtual bool needs_deep_clone_jvms(Compile* C) { return true; }
1044   virtual int Opcode() const;
1045   virtual uint ideal_reg() const { return Op_RegP; }
1046   virtual bool        guaranteed_safepoint()  { return false; }
1047 
1048   // allocations do not modify their arguments
1049   virtual bool        may_modify(const TypeOopPtr* t_oop, PhaseValues* phase) { return false;}
1050 
1051   // Pattern-match a possible usage of AllocateNode.
1052   // Return null if no allocation is recognized.
1053   // The operand is the pointer produced by the (possible) allocation.
1054   // It must be a projection of the Allocate or its subsequent CastPP.
1055   // (Note:  This function is defined in file graphKit.cpp, near
1056   // GraphKit::new_instance/new_array, whose output it recognizes.)
1057   // The 'ptr' may not have an offset unless the 'offset' argument is given.
1058   static AllocateNode* Ideal_allocation(Node* ptr);
1059 
1060   // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
1061   // an offset, which is reported back to the caller.

1086 
1087   // Return true if allocation doesn't escape thread, its escape state
1088   // needs be noEscape or ArgEscape. InitializeNode._does_not_escape
1089   // is true when its allocation's escape state is noEscape or
1090   // ArgEscape. In case allocation's InitializeNode is null, check
1091   // AlllocateNode._is_non_escaping flag.
1092   // AlllocateNode._is_non_escaping is true when its escape state is
1093   // noEscape.
1094   bool does_not_escape_thread() {
1095     InitializeNode* init = nullptr;
1096     return _is_non_escaping || (((init = initialization()) != nullptr) && init->does_not_escape());
1097   }
1098 
1099   // If object doesn't escape in <.init> method and there is memory barrier
1100   // inserted at exit of its <.init>, memory barrier for new is not necessary.
1101   // Inovke this method when MemBar at exit of initializer and post-dominate
1102   // allocation node.
1103   void compute_MemBar_redundancy(ciMethod* initializer);
1104   bool is_allocation_MemBar_redundant() { return _is_allocation_MemBar_redundant; }
1105 
1106   Node* make_ideal_mark(PhaseGVN* phase, Node* control, Node* mem);
1107 
1108   NOT_PRODUCT(virtual void dump_spec(outputStream* st) const;)
1109 };
1110 
1111 //------------------------------AllocateArray---------------------------------
1112 //
1113 // High-level array allocation
1114 //
1115 class AllocateArrayNode : public AllocateNode {
1116 public:
1117   AllocateArrayNode(Compile* C, const TypeFunc* atype, Node* ctrl, Node* mem, Node* abio, Node* size, Node* klass_node,
1118                     Node* initial_test, Node* count_val, Node* valid_length_test,
1119                     Node* init_value, Node* raw_init_value)
1120     : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
1121                    initial_test)
1122   {
1123     init_class_id(Class_AllocateArray);
1124     set_req(AllocateNode::ALength, count_val);
1125     set_req(AllocateNode::ValidLengthTest, valid_length_test);
1126     init_req(AllocateNode::InitValue, init_value);
1127     init_req(AllocateNode::RawInitValue, raw_init_value);
1128   }
1129   virtual uint size_of() const { return sizeof(*this); }
1130   virtual int Opcode() const;
1131 
1132   // Dig the length operand out of a array allocation site.
1133   Node* Ideal_length() {
1134     return in(AllocateNode::ALength);
1135   }
1136 
1137   // Dig the length operand out of a array allocation site and narrow the
1138   // type with a CastII, if necesssary
1139   Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseValues* phase, bool can_create = true);
1140 
1141   // Pattern-match a possible usage of AllocateArrayNode.
1142   // Return null if no allocation is recognized.
1143   static AllocateArrayNode* Ideal_array_allocation(Node* ptr) {
1144     AllocateNode* allo = Ideal_allocation(ptr);
1145     return (allo == nullptr || !allo->is_AllocateArray())
1146            ? nullptr : allo->as_AllocateArray();
1147   }
1148 };
1149 
< prev index next >