181 return progress;
182 }
183
184 //------------------------------Value-----------------------------------------
185 const Type* MulNode::Value(PhaseGVN* phase) const {
186 const Type *t1 = phase->type( in(1) );
187 const Type *t2 = phase->type( in(2) );
188 // Either input is TOP ==> the result is TOP
189 if( t1 == Type::TOP ) return Type::TOP;
190 if( t2 == Type::TOP ) return Type::TOP;
191
192 // Either input is ZERO ==> the result is ZERO.
193 // Not valid for floats or doubles since +0.0 * -0.0 --> +0.0
194 int op = Opcode();
195 if( op == Op_MulI || op == Op_AndI || op == Op_MulL || op == Op_AndL ) {
196 const Type *zero = add_id(); // The multiplicative zero
197 if( t1->higher_equal( zero ) ) return zero;
198 if( t2->higher_equal( zero ) ) return zero;
199 }
200
201 // Either input is BOTTOM ==> the result is the local BOTTOM
202 if( t1 == Type::BOTTOM || t2 == Type::BOTTOM )
203 return bottom_type();
204
205 #if defined(IA32)
206 // Can't trust native compilers to properly fold strict double
207 // multiplication with round-to-zero on this platform.
208 if (op == Op_MulD) {
209 return TypeD::DOUBLE;
210 }
211 #endif
212
213 return mul_ring(t1,t2); // Local flavor of type multiplication
214 }
215
216 MulNode* MulNode::make(Node* in1, Node* in2, BasicType bt) {
217 switch (bt) {
218 case T_INT:
219 return new MulINode(in1, in2);
220 case T_LONG:
918 return new ConvI2LNode(andi);
919 }
920
921 // Masking off sign bits? Dont make them!
922 if (op == Op_RShiftL) {
923 const TypeInt* t12 = phase->type(in1->in(2))->isa_int();
924 if( t12 && t12->is_con() ) { // Shift is by a constant
925 int shift = t12->get_con();
926 shift &= BitsPerJavaLong - 1; // semantics of Java shifts
927 const julong sign_bits_mask = ~(((julong)CONST64(1) << (julong)(BitsPerJavaLong - shift)) -1);
928 // If the AND'ing of the 2 masks has no bits, then only original shifted
929 // bits survive. NO sign-extension bits survive the maskings.
930 if( (sign_bits_mask & mask) == 0 ) {
931 // Use zero-fill shift instead
932 Node *zshift = phase->transform(new URShiftLNode(in1->in(1), in1->in(2)));
933 return new AndLNode(zshift, in(2));
934 }
935 }
936 }
937
938 return MulNode::Ideal(phase, can_reshape);
939 }
940
941 LShiftNode* LShiftNode::make(Node* in1, Node* in2, BasicType bt) {
942 switch (bt) {
943 case T_INT:
944 return new LShiftINode(in1, in2);
945 case T_LONG:
946 return new LShiftLNode(in1, in2);
947 default:
948 fatal("Not implemented for %s", type2name(bt));
949 }
950 return nullptr;
951 }
952
953 //=============================================================================
954
955 static bool const_shift_count(PhaseGVN* phase, Node* shiftNode, int* count) {
956 const TypeInt* tcount = phase->type(shiftNode->in(2))->isa_int();
957 if (tcount != nullptr && tcount->is_con()) {
|
181 return progress;
182 }
183
184 //------------------------------Value-----------------------------------------
185 const Type* MulNode::Value(PhaseGVN* phase) const {
186 const Type *t1 = phase->type( in(1) );
187 const Type *t2 = phase->type( in(2) );
188 // Either input is TOP ==> the result is TOP
189 if( t1 == Type::TOP ) return Type::TOP;
190 if( t2 == Type::TOP ) return Type::TOP;
191
192 // Either input is ZERO ==> the result is ZERO.
193 // Not valid for floats or doubles since +0.0 * -0.0 --> +0.0
194 int op = Opcode();
195 if( op == Op_MulI || op == Op_AndI || op == Op_MulL || op == Op_AndL ) {
196 const Type *zero = add_id(); // The multiplicative zero
197 if( t1->higher_equal( zero ) ) return zero;
198 if( t2->higher_equal( zero ) ) return zero;
199 }
200
201 // Code pattern on return from a call that returns an __Value. Can
202 // be optimized away if the return value turns out to be an oop.
203 if (op == Op_AndX &&
204 in(1) != nullptr &&
205 in(1)->Opcode() == Op_CastP2X &&
206 in(1)->in(1) != nullptr &&
207 phase->type(in(1)->in(1))->isa_oopptr() &&
208 t2->isa_intptr_t()->_lo >= 0 &&
209 t2->isa_intptr_t()->_hi <= MinObjAlignmentInBytesMask) {
210 return add_id();
211 }
212
213 // Either input is BOTTOM ==> the result is the local BOTTOM
214 if( t1 == Type::BOTTOM || t2 == Type::BOTTOM )
215 return bottom_type();
216
217 #if defined(IA32)
218 // Can't trust native compilers to properly fold strict double
219 // multiplication with round-to-zero on this platform.
220 if (op == Op_MulD) {
221 return TypeD::DOUBLE;
222 }
223 #endif
224
225 return mul_ring(t1,t2); // Local flavor of type multiplication
226 }
227
228 MulNode* MulNode::make(Node* in1, Node* in2, BasicType bt) {
229 switch (bt) {
230 case T_INT:
231 return new MulINode(in1, in2);
232 case T_LONG:
930 return new ConvI2LNode(andi);
931 }
932
933 // Masking off sign bits? Dont make them!
934 if (op == Op_RShiftL) {
935 const TypeInt* t12 = phase->type(in1->in(2))->isa_int();
936 if( t12 && t12->is_con() ) { // Shift is by a constant
937 int shift = t12->get_con();
938 shift &= BitsPerJavaLong - 1; // semantics of Java shifts
939 const julong sign_bits_mask = ~(((julong)CONST64(1) << (julong)(BitsPerJavaLong - shift)) -1);
940 // If the AND'ing of the 2 masks has no bits, then only original shifted
941 // bits survive. NO sign-extension bits survive the maskings.
942 if( (sign_bits_mask & mask) == 0 ) {
943 // Use zero-fill shift instead
944 Node *zshift = phase->transform(new URShiftLNode(in1->in(1), in1->in(2)));
945 return new AndLNode(zshift, in(2));
946 }
947 }
948 }
949
950 // Search for GraphKit::mark_word_test patterns and fold the test if the result is statically known
951 Node* load1 = in(1);
952 Node* load2 = nullptr;
953 if (load1->is_Phi() && phase->type(load1)->isa_long()) {
954 load1 = in(1)->in(1);
955 load2 = in(1)->in(2);
956 }
957 if (load1 != nullptr && load1->is_Load() && phase->type(load1)->isa_long() &&
958 (load2 == nullptr || (load2->is_Load() && phase->type(load2)->isa_long()))) {
959 const TypePtr* adr_t1 = phase->type(load1->in(MemNode::Address))->isa_ptr();
960 const TypePtr* adr_t2 = (load2 != nullptr) ? phase->type(load2->in(MemNode::Address))->isa_ptr() : nullptr;
961 if (adr_t1 != nullptr && adr_t1->offset() == oopDesc::mark_offset_in_bytes() &&
962 (load2 == nullptr || (adr_t2 != nullptr && adr_t2->offset() == in_bytes(Klass::prototype_header_offset())))) {
963 if (mask == markWord::inline_type_pattern) {
964 if (adr_t1->is_inlinetypeptr()) {
965 set_req_X(1, in(2), phase);
966 return this;
967 } else if (!adr_t1->can_be_inline_type()) {
968 set_req_X(1, phase->longcon(0), phase);
969 return this;
970 }
971 } else if (mask == markWord::null_free_array_bit_in_place) {
972 if (adr_t1->is_null_free()) {
973 set_req_X(1, in(2), phase);
974 return this;
975 } else if (adr_t1->is_not_null_free()) {
976 set_req_X(1, phase->longcon(0), phase);
977 return this;
978 }
979 } else if (mask == markWord::flat_array_bit_in_place) {
980 if (adr_t1->is_flat()) {
981 set_req_X(1, in(2), phase);
982 return this;
983 } else if (adr_t1->is_not_flat()) {
984 set_req_X(1, phase->longcon(0), phase);
985 return this;
986 }
987 }
988 }
989 }
990
991 return MulNode::Ideal(phase, can_reshape);
992 }
993
994 LShiftNode* LShiftNode::make(Node* in1, Node* in2, BasicType bt) {
995 switch (bt) {
996 case T_INT:
997 return new LShiftINode(in1, in2);
998 case T_LONG:
999 return new LShiftLNode(in1, in2);
1000 default:
1001 fatal("Not implemented for %s", type2name(bt));
1002 }
1003 return nullptr;
1004 }
1005
1006 //=============================================================================
1007
1008 static bool const_shift_count(PhaseGVN* phase, Node* shiftNode, int* count) {
1009 const TypeInt* tcount = phase->type(shiftNode->in(2))->isa_int();
1010 if (tcount != nullptr && tcount->is_con()) {
|