diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 031ba26..0000000 --- a/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/v8-3.14.5.10.tar.bz2 diff --git a/0002_mips.patch b/0002_mips.patch deleted file mode 100644 index c477717..0000000 --- a/0002_mips.patch +++ /dev/null @@ -1,935 +0,0 @@ -Description: mips arch support backported to v8 3.14 branch -Origin: https://github.com/paul99/v8m-rb/tree/dm-mipsbe-3.14 -Last-Update: 2014-04-09 -Acked-by: Jérémy Lal - ---- a/Makefile -+++ b/Makefile -@@ -133,7 +133,7 @@ - - # Architectures and modes to be compiled. Consider these to be internal - # variables, don't override them (use the targets instead). --ARCHES = ia32 x64 arm mipsel -+ARCHES = ia32 x64 arm mipsel mips - DEFAULT_ARCHES = ia32 x64 arm - MODES = release debug - ANDROID_ARCHES = android_ia32 android_arm -@@ -168,10 +168,6 @@ - $(MAKE) -C "$(OUTDIR)" BUILDTYPE=$(BUILDTYPE) \ - builddir="$(abspath $(OUTDIR))/$(BUILDTYPE)" - --mips mips.release mips.debug: -- @echo "V8 does not support big-endian MIPS builds at the moment," \ -- "please use little-endian builds (mipsel)." -- - # Compile targets. MODES and ARCHES are convenience targets. - .SECONDEXPANSION: - $(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES)) ---- a/build/common.gypi -+++ b/build/common.gypi -@@ -176,7 +176,7 @@ - 'V8_TARGET_ARCH_IA32', - ], - }], # v8_target_arch=="ia32" -- ['v8_target_arch=="mipsel"', { -+ ['v8_target_arch=="mipsel" or v8_target_arch=="mips"', { - 'defines': [ - 'V8_TARGET_ARCH_MIPS', - ], -@@ -187,12 +187,17 @@ - ['mipscompiler=="yes"', { - 'target_conditions': [ - ['_toolset=="target"', { -- 'cflags': ['-EL'], -- 'ldflags': ['-EL'], - 'conditions': [ -+ ['v8_target_arch=="mipsel"', { -+ 'cflags': ['-EL'], -+ 'ldflags': ['-EL'], -+ }], -+ ['v8_target_arch=="mips"', { -+ 'cflags': ['-EB'], -+ 'ldflags': ['-EB'], -+ }], - [ 'v8_use_mips_abi_hardfloat=="true"', { - 'cflags': ['-mhard-float'], -- 'ldflags': ['-mhard-float'], - }, { - 'cflags': ['-msoft-float'], - 'ldflags': ['-msoft-float'], -@@ -202,7 +207,8 @@ - }], - ['mips_arch_variant=="loongson"', { - 'cflags': ['-mips3', '-Wa,-mips3'], -- }, { -+ }], -+ ['mips_arch_variant=="mips32r1"', { - 'cflags': ['-mips32', '-Wa,-mips32'], - }], - ], -@@ -290,7 +296,7 @@ - ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ - or OS=="netbsd" or OS=="mac" or OS=="android") and \ - (v8_target_arch=="arm" or v8_target_arch=="ia32" or \ -- v8_target_arch=="mipsel")', { -+ v8_target_arch=="mipsel" or v8_target_arch=="mips")', { - # Check whether the host compiler and target compiler support the - # '-m32' option and set it if so. - 'target_conditions': [ ---- a/build/standalone.gypi -+++ b/build/standalone.gypi -@@ -68,6 +68,7 @@ - 'conditions': [ - ['(v8_target_arch=="arm" and host_arch!="arm") or \ - (v8_target_arch=="mipsel" and host_arch!="mipsel") or \ -+ (v8_target_arch=="mips" and host_arch!="mips") or \ - (v8_target_arch=="x64" and host_arch!="x64") or \ - (OS=="android")', { - 'want_separate_host_toolset': 1, ---- a/src/conversions-inl.h -+++ b/src/conversions-inl.h -@@ -75,7 +75,11 @@ - if (x < k2Pow52) { - x += k2Pow52; - uint32_t result; -+#ifndef BIG_ENDIAN_FLOATING_POINT - Address mantissa_ptr = reinterpret_cast
(&x); -+#else -+ Address mantissa_ptr = reinterpret_cast
(&x) + 4; -+#endif - // Copy least significant 32 bits of mantissa. - memcpy(&result, mantissa_ptr, sizeof(result)); - return negative ? ~result + 1 : result; ---- a/src/globals.h -+++ b/src/globals.h -@@ -83,7 +83,7 @@ - #if CAN_USE_UNALIGNED_ACCESSES - #define V8_HOST_CAN_READ_UNALIGNED 1 - #endif --#elif defined(__MIPSEL__) -+#elif defined(__MIPSEL__) || defined(__MIPSEB__) - #define V8_HOST_ARCH_MIPS 1 - #define V8_HOST_ARCH_32_BIT 1 - #else -@@ -101,13 +101,17 @@ - #define V8_TARGET_ARCH_IA32 1 - #elif defined(__ARMEL__) - #define V8_TARGET_ARCH_ARM 1 --#elif defined(__MIPSEL__) -+#elif defined(__MIPSEL__) || defined(__MIPSEB__) - #define V8_TARGET_ARCH_MIPS 1 - #else - #error Target architecture was not detected as supported by v8 - #endif - #endif - -+#if defined(__MIPSEB__) -+#define BIG_ENDIAN_FLOATING_POINT 1 -+#endif -+ - // Check for supported combinations of host and target architectures. - #if defined(V8_TARGET_ARCH_IA32) && !defined(V8_HOST_ARCH_IA32) - #error Target architecture ia32 is only supported on ia32 host ---- a/src/mips/assembler-mips.cc -+++ b/src/mips/assembler-mips.cc -@@ -1631,10 +1631,17 @@ - void Assembler::ldc1(FPURegister fd, const MemOperand& src) { - // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit - // load to two 32-bit loads. -+#ifndef BIG_ENDIAN_FLOATING_POINT - GenInstrImmediate(LWC1, src.rm(), fd, src.offset_); - FPURegister nextfpreg; - nextfpreg.setcode(fd.code() + 1); - GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4); -+#else -+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ + 4); -+ FPURegister nextfpreg; -+ nextfpreg.setcode(fd.code() + 1); -+ GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_); -+#endif - } - - -@@ -1646,10 +1653,17 @@ - void Assembler::sdc1(FPURegister fd, const MemOperand& src) { - // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit - // store to two 32-bit stores. -+#ifndef BIG_ENDIAN_FLOATING_POINT - GenInstrImmediate(SWC1, src.rm(), fd, src.offset_); - FPURegister nextfpreg; - nextfpreg.setcode(fd.code() + 1); - GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4); -+#else -+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ + 4); -+ FPURegister nextfpreg; -+ nextfpreg.setcode(fd.code() + 1); -+ GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ ); -+#endif - } - - ---- a/src/mips/assembler-mips.h -+++ b/src/mips/assembler-mips.h -@@ -74,6 +74,13 @@ - static const int kNumRegisters = v8::internal::kNumRegisters; - static const int kNumAllocatableRegisters = 14; // v0 through t7. - static const int kSizeInBytes = 4; -+#if __BYTE_ORDER == __LITTLE_ENDIAN -+ static const int kMantissaOffset = 0; -+ static const int kExponentOffset = 4; -+#else -+ static const int kMantissaOffset = 4; -+ static const int kExponentOffset = 0; -+#endif - - static int ToAllocationIndex(Register reg) { - return reg.code() - 2; // zero_reg and 'at' are skipped. ---- a/src/mips/builtins-mips.cc -+++ b/src/mips/builtins-mips.cc -@@ -869,9 +869,7 @@ - ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); - __ LoadRoot(t7, Heap::kUndefinedValueRootIndex); - if (count_constructions) { -- __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset)); -- __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, -- kBitsPerByte); -+ __ lbu(a0, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset)); - __ sll(t0, a0, kPointerSizeLog2); - __ addu(a0, t5, t0); - // a0: offset of first field after pre-allocated fields -@@ -899,14 +897,12 @@ - __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset)); - // The field instance sizes contains both pre-allocated property fields - // and in-object properties. -- __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset)); -- __ Ext(t6, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, -- kBitsPerByte); -+ __ lbu(t6, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset)); - __ Addu(a3, a3, Operand(t6)); -- __ Ext(t6, a0, Map::kInObjectPropertiesByte * kBitsPerByte, -- kBitsPerByte); -+ __ lbu(t6, FieldMemOperand(a2, Map::kInObjectPropertiesOffset)); - __ subu(a3, a3, t6); - -+ - // Done if no extra properties are to be allocated. - __ Branch(&allocated, eq, a3, Operand(zero_reg)); - __ Assert(greater_equal, "Property allocation count failed.", ---- a/src/mips/code-stubs-mips.cc -+++ b/src/mips/code-stubs-mips.cc -@@ -536,13 +536,8 @@ - - - void ConvertToDoubleStub::Generate(MacroAssembler* masm) { --#ifndef BIG_ENDIAN_FLOATING_POINT - Register exponent = result1_; - Register mantissa = result2_; --#else -- Register exponent = result2_; -- Register mantissa = result1_; --#endif - Label not_special; - // Convert from Smi to integer. - __ sra(source_, source_, kSmiTagSize); -@@ -679,9 +674,8 @@ - } else { - ASSERT(destination == kCoreRegisters); - // Load the double from heap number to dst1 and dst2 in double format. -- __ lw(dst1, FieldMemOperand(object, HeapNumber::kValueOffset)); -- __ lw(dst2, FieldMemOperand(object, -- HeapNumber::kValueOffset + kPointerSize)); -+ __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset)); -+ __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset)); - } - __ Branch(&done); - -@@ -1075,6 +1069,11 @@ - // a0-a3 registers to f12/f14 register pairs. - __ Move(f12, a0, a1); - __ Move(f14, a2, a3); -+ } else { -+#ifdef BIG_ENDIAN_FLOATING_POINT -+ __ Swap(a0, a1); -+ __ Swap(a2, a3); -+#endif - } - { - AllowExternalCallThatCantCauseGC scope(masm); -@@ -1088,8 +1087,13 @@ - __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); - } else { - // Double returned in registers v0 and v1. -+#ifndef BIG_ENDIAN_FLOATING_POINT - __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset)); - __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset)); -+#else -+ __ sw(v0, FieldMemOperand(heap_number_result, HeapNumber::kExponentOffset)); -+ __ sw(v1, FieldMemOperand(heap_number_result, HeapNumber::kMantissaOffset)); -+#endif - } - // Place heap_number_result in v0 and return to the pushed return address. - __ pop(ra); -@@ -1320,8 +1324,8 @@ - __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); - } else { - // Load lhs to a double in a2, a3. -- __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4)); -- __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset)); -+ __ lw(a3, FieldMemOperand(lhs, HeapNumber::kExponentOffset)); -+ __ lw(a2, FieldMemOperand(lhs, HeapNumber::kMantissaOffset)); - - // Write Smi from rhs to a1 and a0 in double format. t5 is scratch. - __ mov(t6, rhs); -@@ -1366,11 +1370,11 @@ - __ pop(ra); - // Load rhs to a double in a1, a0. - if (rhs.is(a0)) { -- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); -- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); -+ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kExponentOffset)); -+ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kMantissaOffset)); - } else { -- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); -- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); -+ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kMantissaOffset)); -+ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kExponentOffset)); - } - } - // Fall through to both_loaded_as_doubles. -@@ -1378,7 +1382,6 @@ - - - void EmitNanCheck(MacroAssembler* masm, Condition cc) { -- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); - if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); - // Lhs and rhs are already loaded to f12 and f14 register pairs. -@@ -1391,10 +1394,10 @@ - __ mov(t2, a2); // a2 has LS 32 bits of lhs. - __ mov(t3, a3); // a3 has MS 32 bits of lhs. - } -- Register rhs_exponent = exp_first ? t0 : t1; -- Register lhs_exponent = exp_first ? t2 : t3; -- Register rhs_mantissa = exp_first ? t1 : t0; -- Register lhs_mantissa = exp_first ? t3 : t2; -+ Register rhs_exponent = t1; -+ Register lhs_exponent = t3; -+ Register rhs_mantissa = t0; -+ Register lhs_mantissa = t2; - Label one_is_nan, neither_is_nan; - Label lhs_not_nan_exp_mask_is_loaded; - -@@ -1445,7 +1448,6 @@ - if (cc == eq) { - // Doubles are not equal unless they have the same bit pattern. - // Exception: 0 and -0. -- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); - if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); - // Lhs and rhs are already loaded to f12 and f14 register pairs. -@@ -1458,10 +1460,10 @@ - __ mov(t2, a2); // a2 has LS 32 bits of lhs. - __ mov(t3, a3); // a3 has MS 32 bits of lhs. - } -- Register rhs_exponent = exp_first ? t0 : t1; -- Register lhs_exponent = exp_first ? t2 : t3; -- Register rhs_mantissa = exp_first ? t1 : t0; -- Register lhs_mantissa = exp_first ? t3 : t2; -+ Register rhs_exponent = t1; -+ Register lhs_exponent = t3; -+ Register rhs_mantissa = t0; -+ Register lhs_mantissa = t2; - - __ xor_(v0, rhs_mantissa, lhs_mantissa); - __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg)); -@@ -1495,6 +1497,11 @@ - // a0-a3 registers to f12/f14 register pairs. - __ Move(f12, a0, a1); - __ Move(f14, a2, a3); -+ } else { -+#ifdef BIG_ENDIAN_FLOATING_POINT -+ __ Swap(a0, a1); -+ __ Swap(a2, a3); -+#endif - } - - AllowExternalCallThatCantCauseGC scope(masm); -@@ -1582,14 +1589,14 @@ - __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); - __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); - } else { -- __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset)); -- __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4)); -+ __ lw(a2, FieldMemOperand(lhs, HeapNumber::kMantissaOffset)); -+ __ lw(a3, FieldMemOperand(lhs, HeapNumber::kExponentOffset)); - if (rhs.is(a0)) { -- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); -- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); -+ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kExponentOffset)); -+ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kMantissaOffset)); - } else { -- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset)); -- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4)); -+ __ lw(a0, FieldMemOperand(rhs, HeapNumber::kMantissaOffset)); -+ __ lw(a1, FieldMemOperand(rhs, HeapNumber::kExponentOffset)); - } - } - __ jmp(both_loaded_as_doubles); -@@ -5902,14 +5909,18 @@ - __ Branch(&simple_loop, eq, scratch4, Operand(zero_reg)); - - // Loop for src/dst that are not aligned the same way. -- // This loop uses lwl and lwr instructions. These instructions -- // depend on the endianness, and the implementation assumes little-endian. - { - Label loop; - __ bind(&loop); -+#if __BYTE_ORDER == __BIG_ENDIAN -+ __ lwl(scratch1, MemOperand(src)); -+ __ Addu(src, src, Operand(kReadAlignment)); -+ __ lwr(scratch1, MemOperand(src, -1)); -+#else - __ lwr(scratch1, MemOperand(src)); - __ Addu(src, src, Operand(kReadAlignment)); - __ lwl(scratch1, MemOperand(src, -1)); -+#endif - __ sw(scratch1, MemOperand(dest)); - __ Addu(dest, dest, Operand(kReadAlignment)); - __ Subu(scratch2, limit, dest); -@@ -6616,6 +6627,11 @@ - // in a little endian mode). - __ li(t2, Operand(2)); - __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime); -+#if __BYTE_ORDER == __BIG_ENDIAN -+ __ sll(t0, a2, 8); -+ __ srl(t1, a2, 8); -+ __ or_(a2, t0, t1); -+#endif - __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize)); - __ IncrementCounter(counters->string_add_native(), 1, a2, a3); - __ DropAndRet(2); ---- a/src/mips/codegen-mips.cc -+++ b/src/mips/codegen-mips.cc -@@ -210,8 +210,8 @@ - a1, - t7, - f0); -- __ sw(a0, MemOperand(t3)); // mantissa -- __ sw(a1, MemOperand(t3, kIntSize)); // exponent -+ __ sw(a0, MemOperand(t3, Register::kMantissaOffset)); // mantissa -+ __ sw(a1, MemOperand(t3, Register::kExponentOffset)); // exponent - __ Addu(t3, t3, kDoubleSize); - } - __ Branch(&entry); -@@ -225,8 +225,8 @@ - __ LoadRoot(at, Heap::kTheHoleValueRootIndex); - __ Assert(eq, "object found in smi-only array", at, Operand(t5)); - } -- __ sw(t0, MemOperand(t3)); // mantissa -- __ sw(t1, MemOperand(t3, kIntSize)); // exponent -+ __ sw(t0, MemOperand(t3, Register::kMantissaOffset)); // mantissa -+ __ sw(t1, MemOperand(t3, Register::kExponentOffset)); // exponent - __ Addu(t3, t3, kDoubleSize); - - __ bind(&entry); -@@ -273,7 +273,7 @@ - __ sw(t5, MemOperand(t2, HeapObject::kMapOffset)); - - // Prepare for conversion loop. -- __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4)); -+ __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + Register::kExponentOffset)); - __ Addu(a3, t2, Operand(FixedArray::kHeaderSize)); - __ Addu(t2, t2, Operand(kHeapObjectTag)); - __ sll(t1, t1, 1); -@@ -282,7 +282,7 @@ - __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex); - // Using offsetted addresses. - // a3: begin of destination FixedArray element fields, not tagged -- // t0: begin of source FixedDoubleArray element fields, not tagged, +4 -+ // t0: begin of source FixedDoubleArray element fields, not tagged, points to the exponent - // t1: end of destination FixedArray, not tagged - // t2: destination FixedArray - // t3: the-hole pointer -@@ -296,7 +296,7 @@ - __ Branch(fail); - - __ bind(&loop); -- __ lw(a1, MemOperand(t0)); -+ __ lw(a1, MemOperand(t0, 0)); // exponent - __ Addu(t0, t0, kDoubleSize); - // a1: current element's upper 32 bit - // t0: address of next element's upper 32 bit -@@ -305,7 +305,8 @@ - // Non-hole double, copy value into a heap number. - __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required); - // a2: new heap number -- __ lw(a0, MemOperand(t0, -12)); -+ // Load mantissa of current element, t0 point to exponent of next element. -+ __ lw(a0, MemOperand(t0, (Register::kMantissaOffset - Register::kExponentOffset - kDoubleSize))); - __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset)); - __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset)); - __ mov(a0, a3); ---- a/src/mips/constants-mips.h -+++ b/src/mips/constants-mips.h -@@ -69,6 +69,15 @@ - #endif - - -+#if __BYTE_ORDER == __LITTLE_ENDIAN -+const uint32_t kHoleNanUpper32Offset = 4; -+const uint32_t kHoleNanLower32Offset = 0; -+#else -+const uint32_t kHoleNanUpper32Offset = 0; -+const uint32_t kHoleNanLower32Offset = 4; -+#endif -+ -+ - // Defines constants and accessor classes to assemble, disassemble and - // simulate MIPS32 instructions. - // ---- a/src/mips/lithium-codegen-mips.cc -+++ b/src/mips/lithium-codegen-mips.cc -@@ -2699,7 +2699,7 @@ - } - - if (instr->hydrogen()->RequiresHoleCheck()) { -- __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); -+ __ lw(scratch, MemOperand(elements, kHoleNanUpper32Offset)); - DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32)); - } - -@@ -4869,15 +4869,14 @@ - Handle::cast(elements); - for (int i = 0; i < elements_length; i++) { - int64_t value = double_array->get_representation(i); -- // We only support little endian mode... - int32_t value_low = static_cast(value & 0xFFFFFFFF); - int32_t value_high = static_cast(value >> 32); - int total_offset = - elements_offset + FixedDoubleArray::OffsetOfElementAt(i); - __ li(a2, Operand(value_low)); -- __ sw(a2, FieldMemOperand(result, total_offset)); -+ __ sw(a2, FieldMemOperand(result, total_offset + Register::kMantissaOffset)); - __ li(a2, Operand(value_high)); -- __ sw(a2, FieldMemOperand(result, total_offset + 4)); -+ __ sw(a2, FieldMemOperand(result, total_offset + Register::kExponentOffset)); - } - } else if (elements->IsFixedArray()) { - Handle fast_elements = Handle::cast(elements); ---- a/src/mips/macro-assembler-mips.cc -+++ b/src/mips/macro-assembler-mips.cc -@@ -3300,6 +3300,7 @@ - - // TODO(kalmard) check if this can be optimized to use sw in most cases. - // Can't use unaligned access - copy byte by byte. -+#if __BYTE_ORDER == __LITTLE_ENDIAN - sb(scratch, MemOperand(dst, 0)); - srl(scratch, scratch, 8); - sb(scratch, MemOperand(dst, 1)); -@@ -3307,6 +3308,16 @@ - sb(scratch, MemOperand(dst, 2)); - srl(scratch, scratch, 8); - sb(scratch, MemOperand(dst, 3)); -+#else -+ sb(scratch, MemOperand(dst, 3)); -+ srl(scratch, scratch, 8); -+ sb(scratch, MemOperand(dst, 2)); -+ srl(scratch, scratch, 8); -+ sb(scratch, MemOperand(dst, 1)); -+ srl(scratch, scratch, 8); -+ sb(scratch, MemOperand(dst, 0)); -+#endif -+ - Addu(dst, dst, 4); - - Subu(length, length, Operand(kPointerSize)); -@@ -3412,9 +3423,8 @@ - bind(&have_double_value); - sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize); - Addu(scratch1, scratch1, elements_reg); -- sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize)); -- uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); -- sw(exponent_reg, FieldMemOperand(scratch1, offset)); -+ sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize + kHoleNanLower32Offset)); -+ sw(exponent_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize + kHoleNanUpper32Offset)); - jmp(&done); - - bind(&maybe_nan); -@@ -3459,8 +3469,8 @@ - CpuFeatures::Scope scope(FPU); - sdc1(f0, MemOperand(scratch1, 0)); - } else { -- sw(mantissa_reg, MemOperand(scratch1, 0)); -- sw(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes)); -+ sw(mantissa_reg, MemOperand(scratch1, Register::kMantissaOffset)); -+ sw(exponent_reg, MemOperand(scratch1, Register::kExponentOffset)); - } - bind(&done); - } ---- a/src/mips/stub-cache-mips.cc -+++ b/src/mips/stub-cache-mips.cc -@@ -2195,7 +2195,7 @@ - - // Start checking for special cases. - // Get the argument exponent and clear the sign bit. -- __ lw(t1, FieldMemOperand(v0, HeapNumber::kValueOffset + kPointerSize)); -+ __ lw(t1, FieldMemOperand(v0, HeapNumber::kExponentOffset)); - __ And(t2, t1, Operand(~HeapNumber::kSignMask)); - __ srl(t2, t2, HeapNumber::kMantissaBitsInTopWord); - -@@ -3768,8 +3768,8 @@ - __ ldc1(f0, MemOperand(t3, 0)); - } else { - // t3: pointer to the beginning of the double we want to load. -- __ lw(a2, MemOperand(t3, 0)); -- __ lw(a3, MemOperand(t3, Register::kSizeInBytes)); -+ __ lw(a2, MemOperand(t3, Register::kMantissaOffset)); -+ __ lw(a3, MemOperand(t3, Register::kExponentOffset)); - } - break; - case FAST_ELEMENTS: -@@ -4132,8 +4132,8 @@ - CpuFeatures::Scope scope(FPU); - __ sdc1(f0, MemOperand(a3, 0)); - } else { -- __ sw(t2, MemOperand(a3, 0)); -- __ sw(t3, MemOperand(a3, Register::kSizeInBytes)); -+ __ sw(t2, MemOperand(a3, Register::kMantissaOffset)); -+ __ sw(t3, MemOperand(a3, Register::kExponentOffset)); - } - break; - case FAST_ELEMENTS: -@@ -4296,8 +4296,8 @@ - __ sll(t8, key, 2); - __ addu(t8, a3, t8); - // t8: effective address of destination element. -- __ sw(t4, MemOperand(t8, 0)); -- __ sw(t3, MemOperand(t8, Register::kSizeInBytes)); -+ __ sw(t4, MemOperand(t8, Register::kMantissaOffset)); -+ __ sw(t3, MemOperand(t8, Register::kExponentOffset)); - __ mov(v0, a0); - __ Ret(); - } else { -@@ -4497,11 +4497,11 @@ - __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); - __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch)); - -- // Load the upper word of the double in the fixed array and test for NaN. -+ // Load the exponent in the fixed array and test for NaN. - __ sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize); - __ Addu(indexed_double_offset, elements_reg, Operand(scratch2)); -- uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32); -- __ lw(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset)); -+ __ lw(scratch, FieldMemOperand(indexed_double_offset, -+ FixedArray::kHeaderSize + kHoleNanUpper32Offset)); - __ Branch(&miss_force_generic, eq, scratch, Operand(kHoleNanUpper32)); - - // Non-NaN. Allocate a new heap number and copy the double value into it. -@@ -4509,12 +4509,12 @@ - __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3, - heap_number_map, &slow_allocate_heapnumber); - -- // Don't need to reload the upper 32 bits of the double, it's already in -+ // Don't need to reload the exponent (the upper 32 bits of the double), it's already in - // scratch. - __ sw(scratch, FieldMemOperand(heap_number_reg, - HeapNumber::kExponentOffset)); - __ lw(scratch, FieldMemOperand(indexed_double_offset, -- FixedArray::kHeaderSize)); -+ FixedArray::kHeaderSize + kHoleNanLower32Offset)); - __ sw(scratch, FieldMemOperand(heap_number_reg, - HeapNumber::kMantissaOffset)); - ---- a/src/objects.h -+++ b/src/objects.h -@@ -1344,8 +1344,13 @@ - // is a mixture of sign, exponent and mantissa. Our current platforms are all - // little endian apart from non-EABI arm which is little endian with big - // endian floating point word ordering! -+#ifndef BIG_ENDIAN_FLOATING_POINT - static const int kMantissaOffset = kValueOffset; - static const int kExponentOffset = kValueOffset + 4; -+#else -+ static const int kMantissaOffset = kValueOffset + 4; -+ static const int kExponentOffset = kValueOffset; -+#endif - - static const int kSize = kValueOffset + kDoubleSize; - static const uint32_t kSignMask = 0x80000000u; ---- a/src/profile-generator.cc -+++ b/src/profile-generator.cc -@@ -1819,7 +1819,9 @@ - Address field = obj->address() + offset; - ASSERT(!Memory::Object_at(field)->IsFailure()); - ASSERT(Memory::Object_at(field)->IsHeapObject()); -- *field |= kFailureTag; -+ Object* untagged = *reinterpret_cast(field); -+ intptr_t tagged = reinterpret_cast(untagged) | kFailureTag; -+ *reinterpret_cast(field) = reinterpret_cast(tagged); - } - - private: ---- a/src/runtime.cc -+++ b/src/runtime.cc -@@ -8553,8 +8553,15 @@ - #else - typedef uint64_t ObjectPair; - static inline ObjectPair MakePair(MaybeObject* x, MaybeObject* y) { -+#if __BYTE_ORDER == __LITTLE_ENDIAN - return reinterpret_cast(x) | - (reinterpret_cast(y) << 32); -+#elif __BYTE_ORDER == __BIG_ENDIAN -+ return reinterpret_cast(y) | -+ (reinterpret_cast(x) << 32); -+#else -+#error Unknown endianess -+#endif - } - #endif - ---- a/test/cctest/cctest.gyp -+++ b/test/cctest/cctest.gyp -@@ -118,7 +118,7 @@ - 'test-disasm-arm.cc' - ], - }], -- ['v8_target_arch=="mipsel"', { -+ ['v8_target_arch=="mipsel" or v8_target_arch=="mips"', { - 'sources': [ - 'test-assembler-mips.cc', - 'test-disasm-mips.cc', ---- a/test/cctest/test-assembler-mips.cc -+++ b/test/cctest/test-assembler-mips.cc -@@ -537,11 +537,21 @@ - USE(dummy); - - CHECK_EQ(0x11223344, t.r1); -+#if __BYTE_ORDER == __LITTLE_ENDIAN - CHECK_EQ(0x3344, t.r2); - CHECK_EQ(0xffffbbcc, t.r3); - CHECK_EQ(0x0000bbcc, t.r4); - CHECK_EQ(0xffffffcc, t.r5); - CHECK_EQ(0x3333bbcc, t.r6); -+#elif __BYTE_ORDER == __BIG_ENDIAN -+ CHECK_EQ(0x1122, t.r2); -+ CHECK_EQ(0xffff99aa, t.r3); -+ CHECK_EQ(0x000099aa, t.r4); -+ CHECK_EQ(0xffffff99, t.r5); -+ CHECK_EQ(0x99aa3333, t.r6); -+#else -+#error Unknown endianess -+#endif - } - - -@@ -955,6 +965,7 @@ - Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0); - USE(dummy); - -+#if __BYTE_ORDER == __LITTLE_ENDIAN - CHECK_EQ(0x44bbccdd, t.lwl_0); - CHECK_EQ(0x3344ccdd, t.lwl_1); - CHECK_EQ(0x223344dd, t.lwl_2); -@@ -974,6 +985,29 @@ - CHECK_EQ(0xbbccdd44, t.swr_1); - CHECK_EQ(0xccdd3344, t.swr_2); - CHECK_EQ(0xdd223344, t.swr_3); -+#elif __BYTE_ORDER == __BIG_ENDIAN -+ CHECK_EQ(0x11223344, t.lwl_0); -+ CHECK_EQ(0x223344dd, t.lwl_1); -+ CHECK_EQ(0x3344ccdd, t.lwl_2); -+ CHECK_EQ(0x44bbccdd, t.lwl_3); -+ -+ CHECK_EQ(0xaabbcc11, t.lwr_0); -+ CHECK_EQ(0xaabb1122, t.lwr_1); -+ CHECK_EQ(0xaa112233, t.lwr_2); -+ CHECK_EQ(0x11223344, t.lwr_3); -+ -+ CHECK_EQ(0xaabbccdd, t.swl_0); -+ CHECK_EQ(0x11aabbcc, t.swl_1); -+ CHECK_EQ(0x1122aabb, t.swl_2); -+ CHECK_EQ(0x112233aa, t.swl_3); -+ -+ CHECK_EQ(0xdd223344, t.swr_0); -+ CHECK_EQ(0xccdd3344, t.swr_1); -+ CHECK_EQ(0xbbccdd44, t.swr_2); -+ CHECK_EQ(0xaabbccdd, t.swr_3); -+#else -+#error Unknown endianess -+#endif - } - - ---- a/test/mjsunit/mjsunit.status -+++ b/test/mjsunit/mjsunit.status -@@ -49,7 +49,7 @@ - ############################################################################## - # These use a built-in that's only present in debug mode. They take - # too long to run in debug mode on ARM and MIPS. --fuzz-natives-part*: PASS, SKIP if ($mode == release || $arch == arm || $arch == android_arm || $arch == mipsel) -+fuzz-natives-part*: PASS, SKIP if ($mode == release || $arch == arm || $arch == android_arm || $arch == mipsel || $arch == mips) - - big-object-literal: PASS, SKIP if ($arch == arm || $arch == android_arm) - -@@ -57,7 +57,7 @@ - array-constructor: PASS || TIMEOUT - - # Very slow on ARM and MIPS, contains no architecture dependent code. --unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm || $arch == android_arm || $arch == mipsel) -+unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm || $arch == android_arm || $arch == mipsel || $arch == mips) - - ############################################################################## - # This test sets the umask on a per-process basis and hence cannot be -@@ -127,7 +127,7 @@ - math-floor-of-div-minus-zero: SKIP - - ############################################################################## --[ $arch == mipsel ] -+[ $arch == mipsel || $arch == mips ] - - # Slow tests which times out in debug mode. - try: PASS, SKIP if $mode == debug ---- a/test/mozilla/mozilla.status -+++ b/test/mozilla/mozilla.status -@@ -126,13 +126,13 @@ - ecma/Date/15.9.2.2-6: PASS || FAIL - - # 1026139: These date tests fail on arm and mips --ecma/Date/15.9.5.29-1: PASS || FAIL if ($arch == arm || $arch == mipsel) --ecma/Date/15.9.5.34-1: PASS || FAIL if ($arch == arm || $arch == mipsel) --ecma/Date/15.9.5.28-1: PASS || FAIL if ($arch == arm || $arch == mipsel) -+ecma/Date/15.9.5.29-1: PASS || FAIL if ($arch == arm || $arch == mipsel || $arch == mips) -+ecma/Date/15.9.5.34-1: PASS || FAIL if ($arch == arm || $arch == mipsel || $arch == mips) -+ecma/Date/15.9.5.28-1: PASS || FAIL if ($arch == arm || $arch == mipsel || $arch == mips) - - # 1050186: Arm/MIPS vm is broken; probably unrelated to dates --ecma/Array/15.4.4.5-3: PASS || FAIL if ($arch == arm || $arch == mipsel) --ecma/Date/15.9.5.22-2: PASS || FAIL if ($arch == arm || $arch == mipsel) -+ecma/Array/15.4.4.5-3: PASS || FAIL if ($arch == arm || $arch == mipsel || $arch == mips) -+ecma/Date/15.9.5.22-2: PASS || FAIL if ($arch == arm || $arch == mipsel || $arch == mips) - - # Flaky test that fails due to what appears to be a bug in the test. - # Occurs depending on current time -@@ -854,6 +854,28 @@ - - # Times out and print so much output that we need to skip it to not - # hang the builder. -+js1_5/extensions/regress-342960: SKIP -+ -+# BUG(3251229): Times out when running new crankshaft test script. -+ecma_3/RegExp/regress-311414: SKIP -+ecma/Date/15.9.5.8: SKIP -+ecma/Date/15.9.5.10-2: SKIP -+ecma/Date/15.9.5.11-2: SKIP -+ecma/Date/15.9.5.12-2: SKIP -+js1_5/Array/regress-99120-02: SKIP -+js1_5/extensions/regress-371636: SKIP -+js1_5/Regress/regress-203278-1: SKIP -+js1_5/Regress/regress-404755: SKIP -+js1_5/Regress/regress-451322: SKIP -+ -+ -+# BUG(1040): Allow this test to timeout. -+js1_5/GC/regress-203278-2: PASS || TIMEOUT -+ -+[ $arch == mips ] -+ -+# Times out and print so much output that we need to skip it to not -+# hang the builder. - js1_5/extensions/regress-342960: SKIP - - # BUG(3251229): Times out when running new crankshaft test script. ---- a/test/sputnik/sputnik.status -+++ b/test/sputnik/sputnik.status -@@ -229,3 +229,17 @@ - S15.1.3.4_A2.3_T1: SKIP - S15.1.3.1_A2.5_T1: SKIP - S15.1.3.2_A2.5_T1: SKIP -+ -+[ $arch == mips ] -+ -+# BUG(3251225): Tests that timeout with --nocrankshaft. -+S15.1.3.1_A2.5_T1: SKIP -+S15.1.3.2_A2.5_T1: SKIP -+S15.1.3.1_A2.4_T1: SKIP -+S15.1.3.1_A2.5_T1: SKIP -+S15.1.3.2_A2.4_T1: SKIP -+S15.1.3.2_A2.5_T1: SKIP -+S15.1.3.3_A2.3_T1: SKIP -+S15.1.3.4_A2.3_T1: SKIP -+S15.1.3.1_A2.5_T1: SKIP -+S15.1.3.2_A2.5_T1: SKIP ---- a/test/test262/test262.status -+++ b/test/test262/test262.status -@@ -74,7 +74,7 @@ - S15.1.3.1_A2.5_T1: PASS, SKIP if $mode == debug - S15.1.3.2_A2.5_T1: PASS, SKIP if $mode == debug - --[ $arch == arm || $arch == mipsel ] -+[ $arch == arm || $arch == mipsel || $arch == mips ] - - # TODO(mstarzinger): Causes stack overflow on simulators due to eager - # compilation of parenthesized function literals. Needs investigation. ---- a/tools/gyp/v8.gyp -+++ b/tools/gyp/v8.gyp -@@ -564,7 +564,7 @@ - '../../src/ia32/stub-cache-ia32.cc', - ], - }], -- ['v8_target_arch=="mipsel"', { -+ ['v8_target_arch=="mipsel" or v8_target_arch=="mips"', { - 'sources': [ - '../../src/mips/assembler-mips.cc', - '../../src/mips/assembler-mips.h', ---- a/tools/run-tests.py -+++ b/tools/run-tests.py -@@ -65,6 +65,7 @@ - "arm", - "ia32", - "mipsel", -+ "mips", - "x64"] - - -@@ -268,7 +269,7 @@ - timeout = options.timeout - if timeout == -1: - # Simulators are slow, therefore allow a longer default timeout. -- if arch in ["android", "arm", "mipsel"]: -+ if arch in ["android", "arm", "mipsel", "mips"]: - timeout = 2 * TIMEOUT_DEFAULT; - else: - timeout = TIMEOUT_DEFAULT; ---- a/tools/test-wrapper-gypbuild.py -+++ b/tools/test-wrapper-gypbuild.py -@@ -151,7 +151,7 @@ - print "Unknown mode %s" % mode - return False - for arch in options.arch: -- if not arch in ['ia32', 'x64', 'arm', 'mipsel', 'android_arm', -+ if not arch in ['ia32', 'x64', 'arm', 'mipsel', 'mips', 'android_arm', - 'android_ia32']: - print "Unknown architecture %s" % arch - return False ---- a/tools/test.py -+++ b/tools/test.py -@@ -1282,7 +1282,7 @@ - options.scons_flags.append("arch=" + options.arch) - # Simulators are slow, therefore allow a longer default timeout. - if options.timeout == -1: -- if options.arch in ['android', 'arm', 'mipsel']: -+ if options.arch in ['android', 'arm', 'mipsel', 'mips']: - options.timeout = 2 * TIMEOUT_DEFAULT; - else: - options.timeout = TIMEOUT_DEFAULT; ---- a/tools/testrunner/local/statusfile.py -+++ b/tools/testrunner/local/statusfile.py -@@ -59,7 +59,7 @@ - # Support arches, modes to be written as keywords instead of strings. - VARIABLES = {ALWAYS: True} - for var in ["debug", "release", "android_arm", "android_ia32", "arm", "ia32", -- "mipsel", "x64"]: -+ "mipsel", "mips", "x64"]: - VARIABLES[var] = var - - diff --git a/0002_mips_r15102_backport.patch b/0002_mips_r15102_backport.patch deleted file mode 100644 index 23014d3..0000000 --- a/0002_mips_r15102_backport.patch +++ /dev/null @@ -1,17 +0,0 @@ -Description: upstream fix needed by mips arch -Origin: https://code.google.com/p/v8/source/detail?r=15102 - ---- a/test/cctest/test-mark-compact.cc -+++ b/test/cctest/test-mark-compact.cc -@@ -545,9 +545,9 @@ - } - } else { - if (v8::internal::Snapshot::IsEnabled()) { -- CHECK_LE(delta, 2500 * 1024); // 2400. -+ CHECK_LE(delta, 2942 * 1024); // 2400. - } else { -- CHECK_LE(delta, 2860 * 1024); // 2760. -+ CHECK_LE(delta, 3400 * 1024); // 2760. - } - } - } diff --git a/0002_mips_r19121_backport.patch b/0002_mips_r19121_backport.patch deleted file mode 100644 index 7d08790..0000000 --- a/0002_mips_r19121_backport.patch +++ /dev/null @@ -1,71 +0,0 @@ -Description: upstream fix needed by mips arch -Origin: https://code.google.com/p/v8/source/detail?r=19121 - ---- a/src/mips/code-stubs-mips.cc -+++ b/src/mips/code-stubs-mips.cc -@@ -7808,9 +7808,16 @@ - const int32_t kReturnAddressDistanceFromFunctionStart = - Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize); - -- // Save live volatile registers. -- __ Push(ra, t1, a1); -- const int32_t kNumSavedRegs = 3; -+ // This should contain all kJSCallerSaved registers. -+ const RegList kSavedRegs = -+ kJSCallerSaved | // Caller saved registers. -+ s5.bit(); // Saved stack pointer. -+ -+ // We also save ra, so the count here is one higher than the mask indicates. -+ const int32_t kNumSavedRegs = kNumJSCallerSaved + 2; -+ -+ // Save all caller-save registers as this may be called from anywhere. -+ __ MultiPush(kSavedRegs | ra.bit()); - - // Compute the function's address for the first argument. - __ Subu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart)); -@@ -7822,32 +7829,36 @@ - // Align the stack if necessary. - int frame_alignment = masm->ActivationFrameAlignment(); - if (frame_alignment > kPointerSize) { -- __ mov(t1, sp); - ASSERT(IsPowerOf2(frame_alignment)); -+ __ mov(s5, sp); - __ And(sp, sp, Operand(-frame_alignment)); - } -- -+ // Allocate space for arg slots. -+ __ Subu(sp, sp, kCArgsSlotsSize); - #if defined(V8_HOST_ARCH_MIPS) -- __ li(at, Operand(reinterpret_cast(&entry_hook_))); -- __ lw(at, MemOperand(at)); -+ __ li(t9, Operand(reinterpret_cast(&entry_hook_))); -+ __ lw(t9, MemOperand(t9)); - #else - // Under the simulator we need to indirect the entry hook through a - // trampoline function at a known address. - Address trampoline_address = reinterpret_cast
( - reinterpret_cast(EntryHookTrampoline)); - ApiFunction dispatcher(trampoline_address); -- __ li(at, Operand(ExternalReference(&dispatcher, -+ __ li(t9, Operand(ExternalReference(&dispatcher, - ExternalReference::BUILTIN_CALL, - masm->isolate()))); - #endif -- __ Call(at); -- -+ // Call C function through t9 to conform ABI for PIC. -+ __ Call(t9); - // Restore the stack pointer if needed. - if (frame_alignment > kPointerSize) { -- __ mov(sp, t1); -+ __ mov(sp, s5); -+ } else { -+ __ Addu(sp, sp, kCArgsSlotsSize); - } - -- __ Pop(ra, t1, a1); -+ // Also pop ra to get Ret(0). -+ __ MultiPop(kSavedRegs | ra.bit()); - __ Ret(); - } - diff --git a/0012_loongson_force_cache_flush.patch b/0012_loongson_force_cache_flush.patch deleted file mode 100644 index 6211e6c..0000000 --- a/0012_loongson_force_cache_flush.patch +++ /dev/null @@ -1,22 +0,0 @@ -Description: Forced whole instruction cache flushing on Loongson. - Workaround for instruction cache flushing malfunction on Loongson systems - that occasionally cause failures under stress test conditions. -Author: Dusan Milosavljevic -Origin:upstream,https://github.com/paul99/v8m-rb/commit/ded6c2c2.patch -Last-Update: 2012-06-13 ---- a/src/mips/cpu-mips.cc -+++ b/src/mips/cpu-mips.cc -@@ -72,6 +72,13 @@ - #else // ANDROID - int res; - // See http://www.linux-mips.org/wiki/Cacheflush_Syscall. -+ if (kArchVariant==kLoongson) { -+ // Force flushing of whole instruction cache on Loongson. This is a -+ // workaround for problem when under stress tests cache lines are not -+ // flushed through syscall for some reasons. -+ size_t iCacheSize = 64 * KB; -+ size = iCacheSize + 1; -+ } - res = syscall(__NR_cacheflush, start, size, ICACHE); - if (res) { - V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache"); diff --git a/dead.package b/dead.package new file mode 100644 index 0000000..028c167 --- /dev/null +++ b/dead.package @@ -0,0 +1 @@ +old, broken, effectively useless diff --git a/sources b/sources deleted file mode 100644 index 0ed3ff1..0000000 --- a/sources +++ /dev/null @@ -1 +0,0 @@ -d4e3a038ad387fd7b75d40a89e231ef7 v8-3.14.5.10.tar.bz2 diff --git a/v8-3.14.5.10-CVE-2013-2882.patch b/v8-3.14.5.10-CVE-2013-2882.patch deleted file mode 100644 index b14dbb5..0000000 --- a/v8-3.14.5.10-CVE-2013-2882.patch +++ /dev/null @@ -1,55 +0,0 @@ -From 18e43f925d5d502b7531f40e4a1becba56089303 Mon Sep 17 00:00:00 2001 -From: "mstarzinger@chromium.org" -Date: Mon, 15 Jul 2013 11:41:41 +0000 -Subject: [PATCH] Use internal array as API function cache. - -R=yangguo@chromium.org -BUG=chromium:260106 -TEST=cctest/test-api/Regress260106 - -Review URL: https://codereview.chromium.org/19159003 - -git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@15665 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 ---- - src/apinatives.js | 2 +- - test/cctest/test-api.cc | 11 +++++++++++ - 2 files changed, 12 insertions(+), 1 deletion(-) - -diff --git a/src/apinatives.js b/src/apinatives.js -index 79b41dd..adefab6 100644 ---- a/src/apinatives.js -+++ b/src/apinatives.js -@@ -37,7 +37,7 @@ function CreateDate(time) { - } - - --var kApiFunctionCache = {}; -+var kApiFunctionCache = new InternalArray(); - var functionCache = kApiFunctionCache; - - -diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc -index 728a8f7..bcd28bd 100644 ---- a/test/cctest/test-api.cc -+++ b/test/cctest/test-api.cc -@@ -17707,6 +17707,17 @@ THREADED_TEST(Regress157124) { - } - - -+THREADED_TEST(Regress260106) { -+ LocalContext context; -+ v8::HandleScope scope(context->GetIsolate()); -+ Local templ = FunctionTemplate::New(DummyCallHandler); -+ CompileRun("for (var i = 0; i < 128; i++) Object.prototype[i] = 0;"); -+ Local function = templ->GetFunction(); -+ CHECK(!function.IsEmpty()); -+ CHECK(function->IsFunction()); -+} -+ -+ - #ifndef WIN32 - class ThreadInterruptTest { - public: --- -1.8.3.1 - diff --git a/v8-3.14.5.10-CVE-2013-6640.patch b/v8-3.14.5.10-CVE-2013-6640.patch deleted file mode 100644 index 1a2a420..0000000 --- a/v8-3.14.5.10-CVE-2013-6640.patch +++ /dev/null @@ -1,316 +0,0 @@ -From 520f94a1da96b0f1fd3d0e0c0b82e4d7c1e7d58f Mon Sep 17 00:00:00 2001 -From: "T.C. Hollingsworth" -Date: Fri, 13 Dec 2013 00:45:27 -0700 -Subject: [PATCH] Limit size of dehoistable array indices - -backported from upstream r17801 ---- - src/elements-kind.cc | 30 ++++++++++++++++ - src/elements-kind.h | 2 ++ - src/hydrogen-instructions.h | 9 +++++ - src/hydrogen.cc | 2 +- - src/lithium.cc | 30 ---------------- - src/lithium.h | 3 -- - test/mjsunit/regress/regress-crbug-319835.js | 51 ++++++++++++++++++++++++++++ - test/mjsunit/regress/regress-crbug-319860.js | 47 +++++++++++++++++++++++++ - 8 files changed, 140 insertions(+), 34 deletions(-) - create mode 100644 test/mjsunit/regress/regress-crbug-319835.js - create mode 100644 test/mjsunit/regress/regress-crbug-319860.js - -diff --git a/src/elements-kind.cc b/src/elements-kind.cc -index 655a23b..c93a602 100644 ---- a/src/elements-kind.cc -+++ b/src/elements-kind.cc -@@ -35,6 +35,36 @@ namespace v8 { - namespace internal { - - -+int ElementsKindToShiftSize(ElementsKind elements_kind) { -+ switch (elements_kind) { -+ case EXTERNAL_BYTE_ELEMENTS: -+ case EXTERNAL_PIXEL_ELEMENTS: -+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: -+ return 0; -+ case EXTERNAL_SHORT_ELEMENTS: -+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: -+ return 1; -+ case EXTERNAL_INT_ELEMENTS: -+ case EXTERNAL_UNSIGNED_INT_ELEMENTS: -+ case EXTERNAL_FLOAT_ELEMENTS: -+ return 2; -+ case EXTERNAL_DOUBLE_ELEMENTS: -+ case FAST_DOUBLE_ELEMENTS: -+ case FAST_HOLEY_DOUBLE_ELEMENTS: -+ return 3; -+ case FAST_SMI_ELEMENTS: -+ case FAST_ELEMENTS: -+ case FAST_HOLEY_SMI_ELEMENTS: -+ case FAST_HOLEY_ELEMENTS: -+ case DICTIONARY_ELEMENTS: -+ case NON_STRICT_ARGUMENTS_ELEMENTS: -+ return kPointerSizeLog2; -+ } -+ UNREACHABLE(); -+ return 0; -+} -+ -+ - void PrintElementsKind(FILE* out, ElementsKind kind) { - ElementsAccessor* accessor = ElementsAccessor::ForKind(kind); - PrintF(out, "%s", accessor->name()); -diff --git a/src/elements-kind.h b/src/elements-kind.h -index 3be7711..c5d9df8 100644 ---- a/src/elements-kind.h -+++ b/src/elements-kind.h -@@ -77,6 +77,8 @@ const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1; - const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND - - FIRST_FAST_ELEMENTS_KIND + 1; - -+int ElementsKindToShiftSize(ElementsKind elements_kind); -+ - void PrintElementsKind(FILE* out, ElementsKind kind); - - ElementsKind GetInitialFastElementsKind(); -diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h -index 015212d..a1e5b97 100644 ---- a/src/hydrogen-instructions.h -+++ b/src/hydrogen-instructions.h -@@ -4240,6 +4240,7 @@ class ArrayInstructionInterface { - virtual HValue* GetKey() = 0; - virtual void SetKey(HValue* key) = 0; - virtual void SetIndexOffset(uint32_t index_offset) = 0; -+ virtual int MaxIndexOffsetBits() = 0; - virtual bool IsDehoisted() = 0; - virtual void SetDehoisted(bool is_dehoisted) = 0; - virtual ~ArrayInstructionInterface() { }; -@@ -4274,6 +4275,7 @@ class HLoadKeyedFastElement - void SetIndexOffset(uint32_t index_offset) { - bit_field_ = IndexOffsetField::update(bit_field_, index_offset); - } -+ int MaxIndexOffsetBits() { return 25; } - HValue* GetKey() { return key(); } - void SetKey(HValue* key) { SetOperandAt(1, key); } - bool IsDehoisted() { return IsDehoistedField::decode(bit_field_); } -@@ -4343,6 +4345,7 @@ class HLoadKeyedFastDoubleElement - HValue* dependency() { return OperandAt(2); } - uint32_t index_offset() { return index_offset_; } - void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; } -+ int MaxIndexOffsetBits() { return 25; } - HValue* GetKey() { return key(); } - void SetKey(HValue* key) { SetOperandAt(1, key); } - bool IsDehoisted() { return is_dehoisted_; } -@@ -4420,6 +4423,7 @@ class HLoadKeyedSpecializedArrayElement - ElementsKind elements_kind() const { return elements_kind_; } - uint32_t index_offset() { return index_offset_; } - void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; } -+ int MaxIndexOffsetBits() { return 25; } - HValue* GetKey() { return key(); } - void SetKey(HValue* key) { SetOperandAt(1, key); } - bool IsDehoisted() { return is_dehoisted_; } -@@ -4595,6 +4599,7 @@ class HStoreKeyedFastElement - } - uint32_t index_offset() { return index_offset_; } - void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; } -+ int MaxIndexOffsetBits() { return 25; } - HValue* GetKey() { return key(); } - void SetKey(HValue* key) { SetOperandAt(1, key); } - bool IsDehoisted() { return is_dehoisted_; } -@@ -4648,6 +4653,7 @@ class HStoreKeyedFastDoubleElement - HValue* value() { return OperandAt(2); } - uint32_t index_offset() { return index_offset_; } - void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; } -+ int MaxIndexOffsetBits() { return 25; } - HValue* GetKey() { return key(); } - void SetKey(HValue* key) { SetOperandAt(1, key); } - bool IsDehoisted() { return is_dehoisted_; } -@@ -4706,6 +4712,9 @@ class HStoreKeyedSpecializedArrayElement - ElementsKind elements_kind() const { return elements_kind_; } - uint32_t index_offset() { return index_offset_; } - void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; } -+ int MaxIndexOffsetBits() { -+ return 31 - ElementsKindToShiftSize(elements_kind_); -+ } - HValue* GetKey() { return key(); } - void SetKey(HValue* key) { SetOperandAt(1, key); } - bool IsDehoisted() { return is_dehoisted_; } -diff --git a/src/hydrogen.cc b/src/hydrogen.cc -index 8393e51..e3f79ee 100644 ---- a/src/hydrogen.cc -+++ b/src/hydrogen.cc -@@ -3737,7 +3737,7 @@ static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) { - int32_t value = constant->Integer32Value() * sign; - // We limit offset values to 30 bits because we want to avoid the risk of - // overflows when the offset is added to the object header size. -- if (value >= 1 << 30 || value < 0) return; -+ if (value >= 1 << array_operation->MaxIndexOffsetBits() || value < 0) return; - array_operation->SetKey(subexpression); - if (index->HasNoUses()) { - index->DeleteAndReplaceWith(NULL); -diff --git a/src/lithium.cc b/src/lithium.cc -index eb2198d..1232bf1 100644 ---- a/src/lithium.cc -+++ b/src/lithium.cc -@@ -227,36 +227,6 @@ void LPointerMap::PrintTo(StringStream* stream) { - } - - --int ElementsKindToShiftSize(ElementsKind elements_kind) { -- switch (elements_kind) { -- case EXTERNAL_BYTE_ELEMENTS: -- case EXTERNAL_PIXEL_ELEMENTS: -- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: -- return 0; -- case EXTERNAL_SHORT_ELEMENTS: -- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: -- return 1; -- case EXTERNAL_INT_ELEMENTS: -- case EXTERNAL_UNSIGNED_INT_ELEMENTS: -- case EXTERNAL_FLOAT_ELEMENTS: -- return 2; -- case EXTERNAL_DOUBLE_ELEMENTS: -- case FAST_DOUBLE_ELEMENTS: -- case FAST_HOLEY_DOUBLE_ELEMENTS: -- return 3; -- case FAST_SMI_ELEMENTS: -- case FAST_ELEMENTS: -- case FAST_HOLEY_SMI_ELEMENTS: -- case FAST_HOLEY_ELEMENTS: -- case DICTIONARY_ELEMENTS: -- case NON_STRICT_ARGUMENTS_ELEMENTS: -- return kPointerSizeLog2; -- } -- UNREACHABLE(); -- return 0; --} -- -- - LLabel* LChunk::GetLabel(int block_id) const { - HBasicBlock* block = graph_->blocks()->at(block_id); - int first_instruction = block->first_instruction_index(); -diff --git a/src/lithium.h b/src/lithium.h -index 089926e..a29d9d0 100644 ---- a/src/lithium.h -+++ b/src/lithium.h -@@ -704,9 +704,6 @@ class LChunk: public ZoneObject { - }; - - --int ElementsKindToShiftSize(ElementsKind elements_kind); -- -- - } } // namespace v8::internal - - #endif // V8_LITHIUM_H_ -diff --git a/test/mjsunit/regress/regress-crbug-319835.js b/test/mjsunit/regress/regress-crbug-319835.js -new file mode 100644 -index 0000000..48f871f ---- /dev/null -+++ b/test/mjsunit/regress/regress-crbug-319835.js -@@ -0,0 +1,51 @@ -+// Copyright 2013 the V8 project authors. All rights reserved. -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions are -+// met: -+// -+// * Redistributions of source code must retain the above copyright -+// notice, this list of conditions and the following disclaimer. -+// * Redistributions in binary form must reproduce the above -+// copyright notice, this list of conditions and the following -+// disclaimer in the documentation and/or other materials provided -+// with the distribution. -+// * Neither the name of Google Inc. nor the names of its -+// contributors may be used to endorse or promote products derived -+// from this software without specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+// Flags: --allow-natives-syntax -+ -+try {} catch(e) {} // No need to optimize the top level. -+ -+var size = 0x20000; -+var a = new Float64Array(size); -+var training = new Float64Array(10); -+function store(a, index) { -+ var offset = 0x20000000; -+ for (var i = 0; i < 1; i++) { -+ a[index + offset] = 0xcc; -+ } -+} -+ -+store(training, -0x20000000); -+store(training, -0x20000000 + 1); -+store(training, -0x20000000); -+store(training, -0x20000000 + 1); -+%OptimizeFunctionOnNextCall(store); -+ -+// Segfault maybe? -+for (var i = -0x20000000; i < -0x20000000 + size; i++) { -+ store(a, i); -+} -diff --git a/test/mjsunit/regress/regress-crbug-319860.js b/test/mjsunit/regress/regress-crbug-319860.js -new file mode 100644 -index 0000000..b81fb85 ---- /dev/null -+++ b/test/mjsunit/regress/regress-crbug-319860.js -@@ -0,0 +1,47 @@ -+// Copyright 2013 the V8 project authors. All rights reserved. -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions are -+// met: -+// -+// * Redistributions of source code must retain the above copyright -+// notice, this list of conditions and the following disclaimer. -+// * Redistributions in binary form must reproduce the above -+// copyright notice, this list of conditions and the following -+// disclaimer in the documentation and/or other materials provided -+// with the distribution. -+// * Neither the name of Google Inc. nor the names of its -+// contributors may be used to endorse or promote products derived -+// from this software without specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+// Flags: --allow-natives-syntax -+ -+function read(a, index) { -+ var offset = 0x2000000; -+ var result; -+ for (var i = 0; i < 1; i++) { -+ result = a[index + offset]; -+ } -+ return result; -+} -+ -+var a = new Int8Array(0x2000001); -+read(a, 0); -+read(a, 0); -+%OptimizeFunctionOnNextCall(read); -+ -+// Segfault maybe? -+for (var i = 0; i > -1000000; --i) { -+ read(a, i); -+} --- -1.8.4.2 - diff --git a/v8-3.14.5.10-CVE-2013-6650.patch b/v8-3.14.5.10-CVE-2013-6650.patch deleted file mode 100644 index d44811f..0000000 --- a/v8-3.14.5.10-CVE-2013-6650.patch +++ /dev/null @@ -1,80 +0,0 @@ -From 3928813f014d3cdaed83fefc3a454078272f114b Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Hr=C4=8Dka?= -Date: Tue, 18 Feb 2014 00:23:04 +0100 -Subject: [PATCH] Backport Fix for CVE-2013-6650 Original patch - https://code.google.com/p/v8/source/detail?r=18483 - -Resolve: rhbz#1059070 ---- - src/store-buffer.cc | 2 +- - test/mjsunit/regress/regress-331444.js | 45 ++++++++++++++++++++++++++++++++++ - 2 files changed, 46 insertions(+), 1 deletion(-) - create mode 100644 test/mjsunit/regress/regress-331444.js - -diff --git a/src/store-buffer.cc b/src/store-buffer.cc -index 66488ae..b9055f8 100644 ---- a/src/store-buffer.cc -+++ b/src/store-buffer.cc -@@ -242,7 +242,7 @@ void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) { - containing_chunk = MemoryChunk::FromAnyPointerAddress(addr); - } - int old_counter = containing_chunk->store_buffer_counter(); -- if (old_counter == threshold) { -+ if (old_counter >= threshold) { - containing_chunk->set_scan_on_scavenge(true); - created_new_scan_on_scavenge_pages = true; - } -diff --git a/test/mjsunit/regress/regress-331444.js b/test/mjsunit/regress/regress-331444.js -new file mode 100644 -index 0000000..3df0a08 ---- /dev/null -+++ b/test/mjsunit/regress/regress-331444.js -@@ -0,0 +1,45 @@ -+// Copyright 2014 the V8 project authors. All rights reserved. -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions are -+// met: -+// -+// * Redistributions of source code must retain the above copyright -+// notice, this list of conditions and the following disclaimer. -+// * Redistributions in binary form must reproduce the above -+// copyright notice, this list of conditions and the following -+// disclaimer in the documentation and/or other materials provided -+// with the distribution. -+// * Neither the name of Google Inc. nor the names of its -+// contributors may be used to endorse or promote products derived -+// from this software without specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+// Flags: --expose-gc -+ -+ -+function boom() { -+ var args = []; -+ for (var i = 0; i < 125000; i++) -+ args.push(i); -+ return Array.apply(Array, args); -+} -+var array = boom(); -+function fib(n) { -+ var f0 = 0, f1 = 1; -+ for (; n > 0; n = n - 1) { -+ f0 + f1; -+ f0 = array; -+ } -+} -+fib(12); --- -1.8.3.1 - diff --git a/v8-3.14.5.10-CVE-2013-6668-segfault.patch b/v8-3.14.5.10-CVE-2013-6668-segfault.patch deleted file mode 100644 index 8e3d600..0000000 --- a/v8-3.14.5.10-CVE-2013-6668-segfault.patch +++ /dev/null @@ -1,60 +0,0 @@ -From 3122e0eae64c5ab494b29d0a9cadef902d93f1f9 Mon Sep 17 00:00:00 2001 -From: Fedor Indutny -Date: Fri, 22 Aug 2014 03:59:35 +0400 -Subject: [PATCH] deps: fix up v8 after fd80a3 - -fd80a31e0697d6317ce8c2d289575399f4e06d21 has introduced a segfault -during redundant boundary check elimination (#8208). - -The problem consists of two parts: - - 1. Abscense of instruction iterator in - `EliminateRedundantBoundsChecks`. It was present in recent v8, but - wasn't considered important at the time of backport. However, since - the function is changing instructions order in block, it is - important to not rely at `i->next()` at the end of the loop. - 2. Too strict ASSERT in `MoveIndexIfNecessary`. It is essentially a - backport of a45c96ab from v8's upstream. See - https://github.com/v8/v8/commit/a45c96ab for details. - -fix #8208 ---- - src/hydrogen.cc | 11 ++++++++--- - 1 file changed, 8 insertions(+), 3 deletions(-) - -diff --git a/src/hydrogen.cc b/src/hydrogen.cc -index 50d8e49..18a6b60 100644 ---- a/src/hydrogen.cc -+++ b/src/hydrogen.cc -@@ -3546,7 +3546,11 @@ class BoundsCheckBbData: public ZoneObject { - void MoveIndexIfNecessary(HValue* index_raw, - HBoundsCheck* insert_before, - HInstruction* end_of_scan_range) { -- ASSERT(index_raw->IsAdd() || index_raw->IsSub()); -+ if (!index_raw->IsAdd() && !index_raw->IsSub()) { -+ // index_raw can be HAdd(index_base, offset), HSub(index_base, offset), -+ // or index_base directly. In the latter case, no need to move anything. -+ return; -+ } - HBinaryOperation* index = - HArithmeticBinaryOperation::cast(index_raw); - HValue* left_input = index->left(); -@@ -3581,7 +3585,6 @@ class BoundsCheckBbData: public ZoneObject { - HBoundsCheck* tighter_check) { - ASSERT(original_check->length() == tighter_check->length()); - MoveIndexIfNecessary(tighter_check->index(), original_check, tighter_check); -- original_check->ReplaceAllUsesWith(original_check->index()); - original_check->SetOperandAt(0, tighter_check->index()); - } - }; -@@ -3624,7 +3627,9 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb, - BoundsCheckTable* table) { - BoundsCheckBbData* bb_data_list = NULL; - -- for (HInstruction* i = bb->first(); i != NULL; i = i->next()) { -+ HInstruction* next; -+ for (HInstruction* i = bb->first(); i != NULL; i = next) { -+ next = i->next(); - if (!i->IsBoundsCheck()) continue; - - HBoundsCheck* check = HBoundsCheck::cast(i); diff --git a/v8-3.14.5.10-CVE-2013-6668.patch b/v8-3.14.5.10-CVE-2013-6668.patch deleted file mode 100644 index fe5cb6d..0000000 --- a/v8-3.14.5.10-CVE-2013-6668.patch +++ /dev/null @@ -1,186 +0,0 @@ -From fd80a31e0697d6317ce8c2d289575399f4e06d21 Mon Sep 17 00:00:00 2001 -From: Fedor Indutny -Date: Thu, 14 Aug 2014 19:29:28 +0400 -Subject: [PATCH] deps: backport 5f836c from v8 upstream - -Original commit message: - - Fix Hydrogen bounds check elimination - - When combining bounds checks, they must all be moved before the first load/store - that they are guarding. - - BUG=chromium:344186 - LOG=y - R=svenpanne@chromium.org - - Review URL: https://codereview.chromium.org/172093002 - - git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@19475 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 - -fix #8070 ---- - src/hydrogen.cc | 106 +++++++++++++++++++++++------------------------- - 1 file changed, 50 insertions(+), 56 deletions(-) - -diff --git a/src/hydrogen.cc b/src/hydrogen.cc -index e3f79ee..50d8e49 100644 ---- a/src/hydrogen.cc -+++ b/src/hydrogen.cc -@@ -3487,13 +3487,7 @@ class BoundsCheckBbData: public ZoneObject { - keep_new_check = true; - upper_check_ = new_check; - } else { -- BuildOffsetAdd(upper_check_, -- &added_upper_index_, -- &added_upper_offset_, -- Key()->IndexBase(), -- new_check->index()->representation(), -- new_offset); -- upper_check_->SetOperandAt(0, added_upper_index_); -+ TightenCheck(upper_check_, new_check); - } - } else if (new_offset < lower_offset_) { - lower_offset_ = new_offset; -@@ -3501,28 +3495,27 @@ class BoundsCheckBbData: public ZoneObject { - keep_new_check = true; - lower_check_ = new_check; - } else { -- BuildOffsetAdd(lower_check_, -- &added_lower_index_, -- &added_lower_offset_, -- Key()->IndexBase(), -- new_check->index()->representation(), -- new_offset); -- lower_check_->SetOperandAt(0, added_lower_index_); -+ TightenCheck(lower_check_, new_check); - } - } else { -- ASSERT(false); -+ // Should never have called CoverCheck() in this case. -+ UNREACHABLE(); - } - - if (!keep_new_check) { - new_check->DeleteAndReplaceWith(NULL); -+ } else { -+ HBoundsCheck* first_check = new_check == lower_check_ ? upper_check_ -+ : lower_check_; -+ // The length is guaranteed to be live at first_check. -+ ASSERT(new_check->length() == first_check->length()); -+ HInstruction* old_position = new_check->next(); -+ new_check->Unlink(); -+ new_check->InsertAfter(first_check); -+ MoveIndexIfNecessary(new_check->index(), new_check, old_position); - } - } - -- void RemoveZeroOperations() { -- RemoveZeroAdd(&added_lower_index_, &added_lower_offset_); -- RemoveZeroAdd(&added_upper_index_, &added_upper_offset_); -- } -- - BoundsCheckBbData(BoundsCheckKey* key, - int32_t lower_offset, - int32_t upper_offset, -@@ -3537,10 +3530,6 @@ class BoundsCheckBbData: public ZoneObject { - basic_block_(bb), - lower_check_(lower_check), - upper_check_(upper_check), -- added_lower_index_(NULL), -- added_lower_offset_(NULL), -- added_upper_index_(NULL), -- added_upper_offset_(NULL), - next_in_bb_(next_in_bb), - father_in_dt_(father_in_dt) { } - -@@ -3551,44 +3540,50 @@ class BoundsCheckBbData: public ZoneObject { - HBasicBlock* basic_block_; - HBoundsCheck* lower_check_; - HBoundsCheck* upper_check_; -- HAdd* added_lower_index_; -- HConstant* added_lower_offset_; -- HAdd* added_upper_index_; -- HConstant* added_upper_offset_; - BoundsCheckBbData* next_in_bb_; - BoundsCheckBbData* father_in_dt_; - -- void BuildOffsetAdd(HBoundsCheck* check, -- HAdd** add, -- HConstant** constant, -- HValue* original_value, -- Representation representation, -- int32_t new_offset) { -- HConstant* new_constant = new(BasicBlock()->zone()) -- HConstant(new_offset, Representation::Integer32()); -- if (*add == NULL) { -- new_constant->InsertBefore(check); -- // Because of the bounds checks elimination algorithm, the index is always -- // an HAdd or an HSub here, so we can safely cast to an HBinaryOperation. -- HValue* context = HBinaryOperation::cast(check->index())->context(); -- *add = new(BasicBlock()->zone()) HAdd(context, -- original_value, -- new_constant); -- (*add)->AssumeRepresentation(representation); -- (*add)->InsertBefore(check); -- } else { -- new_constant->InsertBefore(*add); -- (*constant)->DeleteAndReplaceWith(new_constant); -+ void MoveIndexIfNecessary(HValue* index_raw, -+ HBoundsCheck* insert_before, -+ HInstruction* end_of_scan_range) { -+ ASSERT(index_raw->IsAdd() || index_raw->IsSub()); -+ HBinaryOperation* index = -+ HArithmeticBinaryOperation::cast(index_raw); -+ HValue* left_input = index->left(); -+ HValue* right_input = index->right(); -+ bool must_move_index = false; -+ bool must_move_left_input = false; -+ bool must_move_right_input = false; -+ for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) { -+ if (cursor == left_input) must_move_left_input = true; -+ if (cursor == right_input) must_move_right_input = true; -+ if (cursor == index) must_move_index = true; -+ if (cursor->previous() == NULL) { -+ cursor = cursor->block()->dominator()->end(); -+ } else { -+ cursor = cursor->previous(); -+ } - } -- *constant = new_constant; -- } - -- void RemoveZeroAdd(HAdd** add, HConstant** constant) { -- if (*add != NULL && (*constant)->Integer32Value() == 0) { -- (*add)->DeleteAndReplaceWith((*add)->left()); -- (*constant)->DeleteAndReplaceWith(NULL); -+ // The BCE algorithm only selects mergeable bounds checks that share -+ // the same "index_base", so we'll only ever have to move constants. -+ if (must_move_left_input) { -+ HConstant::cast(left_input)->Unlink(); -+ HConstant::cast(left_input)->InsertBefore(index); -+ } -+ if (must_move_right_input) { -+ HConstant::cast(right_input)->Unlink(); -+ HConstant::cast(right_input)->InsertBefore(index); - } - } -+ -+ void TightenCheck(HBoundsCheck* original_check, -+ HBoundsCheck* tighter_check) { -+ ASSERT(original_check->length() == tighter_check->length()); -+ MoveIndexIfNecessary(tighter_check->index(), original_check, tighter_check); -+ original_check->ReplaceAllUsesWith(original_check->index()); -+ original_check->SetOperandAt(0, tighter_check->index()); -+ } - }; - - -@@ -3683,7 +3678,6 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb, - for (BoundsCheckBbData* data = bb_data_list; - data != NULL; - data = data->NextInBasicBlock()) { -- data->RemoveZeroOperations(); - if (data->FatherInDominatorTree()) { - table->Insert(data->Key(), data->FatherInDominatorTree(), zone()); - } else { diff --git a/v8-3.14.5.10-CVE-2014-1704-1.patch b/v8-3.14.5.10-CVE-2014-1704-1.patch deleted file mode 100644 index 2df5230..0000000 --- a/v8-3.14.5.10-CVE-2014-1704-1.patch +++ /dev/null @@ -1,77 +0,0 @@ -From bf973073d98660edf35e01e6984029e46eb85368 Mon Sep 17 00:00:00 2001 -From: "dslomov@chromium.org" - -Date: Mon, 13 Jan 2014 13:00:09 +0000 -Subject: [PATCH] Use unsigned integer arithmetic in Zone::NewExpand. - - BUG=328202 - R=jkummerow@chromium.org - LOG=N - - Review URL: https://codereview.chromium.org/108783005 - - git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@18564 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 ---- - src/zone.cc | 29 +++++++++++++++++++---------- - 1 file changed, 19 insertions(+), 10 deletions(-) - -diff --git a/src/zone.cc b/src/zone.cc -index 51b8113..c12978f 100644 ---- a/src/zone.cc -+++ b/src/zone.cc -@@ -175,25 +175,31 @@ Address Zone::NewExpand(int size) { - // except that we employ a maximum segment size when we delete. This - // is to avoid excessive malloc() and free() overhead. - Segment* head = segment_head_; -- int old_size = (head == NULL) ? 0 : head->size(); -- static const int kSegmentOverhead = sizeof(Segment) + kAlignment; -- int new_size_no_overhead = size + (old_size << 1); -- int new_size = kSegmentOverhead + new_size_no_overhead; -+ const size_t old_size = (head == NULL) ? 0 : head->size(); -+ static const size_t kSegmentOverhead = sizeof(Segment) + kAlignment; -+ const size_t new_size_no_overhead = size + (old_size << 1); -+ size_t new_size = kSegmentOverhead + new_size_no_overhead; -+ const size_t min_new_size = kSegmentOverhead + static_cast(size); - // Guard against integer overflow. -- if (new_size_no_overhead < size || new_size < kSegmentOverhead) { -+ if (new_size_no_overhead < static_cast(size) || -+ new_size < static_cast(kSegmentOverhead)) { - V8::FatalProcessOutOfMemory("Zone"); - return NULL; - } -- if (new_size < kMinimumSegmentSize) { -+ if (new_size < static_cast(kMinimumSegmentSize)) { - new_size = kMinimumSegmentSize; -- } else if (new_size > kMaximumSegmentSize) { -+ } else if (new_size > static_cast(kMaximumSegmentSize)) { - // Limit the size of new segments to avoid growing the segment size - // exponentially, thus putting pressure on contiguous virtual address space. - // All the while making sure to allocate a segment large enough to hold the - // requested size. -- new_size = Max(kSegmentOverhead + size, kMaximumSegmentSize); -+ new_size = Max(min_new_size, static_cast(kMaximumSegmentSize)); - } -- Segment* segment = NewSegment(new_size); -+ if (new_size > INT_MAX) { -+ V8::FatalProcessOutOfMemory("Zone"); -+ return NULL; -+ } -+ Segment* segment = NewSegment(static_cast(new_size)); - if (segment == NULL) { - V8::FatalProcessOutOfMemory("Zone"); - return NULL; -@@ -203,7 +209,10 @@ Address Zone::NewExpand(int size) { - Address result = RoundUp(segment->start(), kAlignment); - position_ = result + size; - // Check for address overflow. -- if (position_ < result) { -+ // (Should not happen since the segment is guaranteed to accomodate -+ // size bytes + header and alignment padding) -+ if (reinterpret_cast(position_) -+ < reinterpret_cast(result)) { - V8::FatalProcessOutOfMemory("Zone"); - return NULL; - } --- -1.8.5.3 - diff --git a/v8-3.14.5.10-CVE-2016-1669.patch b/v8-3.14.5.10-CVE-2016-1669.patch deleted file mode 100644 index 1a37129..0000000 --- a/v8-3.14.5.10-CVE-2016-1669.patch +++ /dev/null @@ -1,30 +0,0 @@ -diff -up v8-3.14.5.10/src/zone.cc.3a9bfec v8-3.14.5.10/src/zone.cc ---- v8-3.14.5.10/src/zone.cc.3a9bfec 2016-07-06 11:21:45.362891427 -0400 -+++ v8-3.14.5.10/src/zone.cc 2016-07-06 11:22:08.538764825 -0400 -@@ -168,7 +168,10 @@ Address Zone::NewExpand(int size) { - // Make sure the requested size is already properly aligned and that - // there isn't enough room in the Zone to satisfy the request. - ASSERT(size == RoundDown(size, kAlignment)); -- ASSERT(size > limit_ - position_); -+ ASSERT(limit_ < position_ || -+ reinterpret_cast(limit_) - -+ reinterpret_cast(position_) < -+ size); - - // Compute the new segment size. We use a 'high water mark' - // strategy, where we increase the segment size every time we expand -diff -up v8-3.14.5.10/src/zone-inl.h.3a9bfec v8-3.14.5.10/src/zone-inl.h ---- v8-3.14.5.10/src/zone-inl.h.3a9bfec 2016-07-06 11:21:00.075136898 -0400 -+++ v8-3.14.5.10/src/zone-inl.h 2016-07-06 11:21:31.546966899 -0400 -@@ -55,7 +55,10 @@ inline void* Zone::New(int size) { - // Check if the requested size is available without expanding. - Address result = position_; - -- if (size > limit_ - position_) { -+ const uintptr_t limit = reinterpret_cast(limit_); -+ const uintptr_t position = reinterpret_cast(position_); -+ // position_ > limit_ can be true after the alignment correction above. -+ if (limit < position || (size_t) size > limit - position) { - result = NewExpand(size); - } else { - position_ += size; diff --git a/v8-3.14.5.10-REPLACE_INVALID_UTF8.patch b/v8-3.14.5.10-REPLACE_INVALID_UTF8.patch deleted file mode 100644 index 28db0dd..0000000 --- a/v8-3.14.5.10-REPLACE_INVALID_UTF8.patch +++ /dev/null @@ -1,195 +0,0 @@ -diff -up v8-3.14.5.10/include/v8.h.riu v8-3.14.5.10/include/v8.h ---- v8-3.14.5.10/include/v8.h.riu 2015-09-21 13:31:44.871068685 -0400 -+++ v8-3.14.5.10/include/v8.h 2015-09-21 13:32:08.884868178 -0400 -@@ -1076,7 +1076,11 @@ class String : public Primitive { - NO_OPTIONS = 0, - HINT_MANY_WRITES_EXPECTED = 1, - NO_NULL_TERMINATION = 2, -- PRESERVE_ASCII_NULL = 4 -+ PRESERVE_ASCII_NULL = 4, -+ // Used by WriteUtf8 to replace orphan surrogate code units with the -+ // unicode replacement character. Needs to be set to guarantee valid UTF-8 -+ // output. -+ REPLACE_INVALID_UTF8 = 8 - }; - - // 16-bit character codes. -diff -up v8-3.14.5.10/src/api.cc.riu v8-3.14.5.10/src/api.cc ---- v8-3.14.5.10/src/api.cc.riu 2015-09-21 13:33:24.604235945 -0400 -+++ v8-3.14.5.10/src/api.cc 2015-09-21 13:37:21.428258540 -0400 -@@ -3759,7 +3759,8 @@ static int RecursivelySerializeToUtf8(i: - int end, - int recursion_budget, - int32_t previous_character, -- int32_t* last_character) { -+ int32_t* last_character, -+ bool replace_invalid_utf8) { - int utf8_bytes = 0; - while (true) { - if (string->IsAsciiRepresentation()) { -@@ -3775,7 +3776,10 @@ static int RecursivelySerializeToUtf8(i: - for (int i = start; i < end; i++) { - uint16_t character = data[i]; - current += -- unibrow::Utf8::Encode(current, character, previous_character); -+ unibrow::Utf8::Encode(current, -+ character, -+ previous_character, -+ replace_invalid_utf8); - previous_character = character; - } - *last_character = previous_character; -@@ -3788,7 +3792,10 @@ static int RecursivelySerializeToUtf8(i: - for (int i = start; i < end; i++) { - uint16_t character = data[i]; - current += -- unibrow::Utf8::Encode(current, character, previous_character); -+ unibrow::Utf8::Encode(current, -+ character, -+ previous_character, -+ replace_invalid_utf8); - previous_character = character; - } - *last_character = previous_character; -@@ -3824,7 +3831,8 @@ static int RecursivelySerializeToUtf8(i: - boundary, - recursion_budget - 1, - previous_character, -- &previous_character); -+ &previous_character, -+ replace_invalid_utf8); - if (extra_utf8_bytes < 0) return extra_utf8_bytes; - buffer += extra_utf8_bytes; - utf8_bytes += extra_utf8_bytes; -@@ -3879,7 +3887,10 @@ int String::WriteUtf8(char* buffer, - return len; - } - -- if (capacity == -1 || capacity / 3 >= string_length) { -+ bool replace_invalid_utf8 = (options & REPLACE_INVALID_UTF8); -+ int max16BitCodeUnitSize = unibrow::Utf8::kMax16BitCodeUnitSize; -+ -+ if (capacity == -1 || capacity / max16BitCodeUnitSize >= string_length) { - int32_t previous = unibrow::Utf16::kNoPreviousCharacter; - const int kMaxRecursion = 100; - int utf8_bytes = -@@ -3889,7 +3900,8 @@ int String::WriteUtf8(char* buffer, - string_length, - kMaxRecursion, - previous, -- &previous); -+ &previous, -+ replace_invalid_utf8); - if (utf8_bytes >= 0) { - // Success serializing with recursion. - if ((options & NO_NULL_TERMINATION) == 0 && -@@ -3942,14 +3954,16 @@ int String::WriteUtf8(char* buffer, - char intermediate[unibrow::Utf8::kMaxEncodedSize]; - for (; i < len && pos < capacity; i++) { - i::uc32 c = write_input_buffer.GetNext(); -- if (unibrow::Utf16::IsTrailSurrogate(c) && -- unibrow::Utf16::IsLeadSurrogate(previous)) { -+ if (unibrow::Utf16::IsSurrogatePair(previous, c)) { - // We can't use the intermediate buffer here because the encoding - // of surrogate pairs is done under assumption that you can step - // back and fix the UTF8 stream. Luckily we only need space for one - // more byte, so there is always space. - ASSERT(pos < capacity); -- int written = unibrow::Utf8::Encode(buffer + pos, c, previous); -+ int written = unibrow::Utf8::Encode(buffer + pos, -+ c, -+ previous, -+ replace_invalid_utf8); - ASSERT(written == 1); - pos += written; - nchars++; -@@ -3957,7 +3971,8 @@ int String::WriteUtf8(char* buffer, - int written = - unibrow::Utf8::Encode(intermediate, - c, -- unibrow::Utf16::kNoPreviousCharacter); -+ unibrow::Utf16::kNoPreviousCharacter, -+ replace_invalid_utf8); - if (pos + written <= capacity) { - for (int j = 0; j < written; j++) { - buffer[pos + j] = intermediate[j]; -diff -up v8-3.14.5.10/src/unicode.h.riu v8-3.14.5.10/src/unicode.h ---- v8-3.14.5.10/src/unicode.h.riu 2015-09-21 13:38:50.617513839 -0400 -+++ v8-3.14.5.10/src/unicode.h 2015-09-21 13:40:32.019667163 -0400 -@@ -117,6 +117,9 @@ class Buffer { - - class Utf16 { - public: -+ static inline bool IsSurrogatePair(int lead, int trail) { -+ return IsLeadSurrogate(lead) && IsTrailSurrogate(trail); -+ } - static inline bool IsLeadSurrogate(int code) { - if (code == kNoPreviousCharacter) return false; - return (code & 0xfc00) == 0xd800; -@@ -152,13 +155,19 @@ class Utf16 { - class Utf8 { - public: - static inline uchar Length(uchar chr, int previous); -- static inline unsigned Encode( -- char* out, uchar c, int previous); -+ static inline unsigned Encode(char* out, -+ uchar c, -+ int previous, -+ bool replace_invalid = false); - static const byte* ReadBlock(Buffer str, byte* buffer, - unsigned capacity, unsigned* chars_read, unsigned* offset); - static uchar CalculateValue(const byte* str, - unsigned length, - unsigned* cursor); -+ -+ -+ // The unicode replacement character, used to signal invalid unicode -+ // sequences (e.g. an orphan surrogate) when converting to a UTF-8 encoding. - static const uchar kBadChar = 0xFFFD; - static const unsigned kMaxEncodedSize = 4; - static const unsigned kMaxOneByteChar = 0x7f; -@@ -170,6 +179,9 @@ class Utf8 { - // that match are coded as a 4 byte UTF-8 sequence. - static const unsigned kBytesSavedByCombiningSurrogates = 2; - static const unsigned kSizeOfUnmatchedSurrogate = 3; -+ // The maximum size a single UTF-16 code unit may take up when encoded as -+ // UTF-8. -+ static const unsigned kMax16BitCodeUnitSize = 3; - - private: - template friend class Utf8InputBuffer; -diff -up v8-3.14.5.10/src/unicode-inl.h.riu v8-3.14.5.10/src/unicode-inl.h ---- v8-3.14.5.10/src/unicode-inl.h.riu 2015-09-21 13:37:36.002136853 -0400 -+++ v8-3.14.5.10/src/unicode-inl.h 2015-09-21 13:38:41.566589411 -0400 -@@ -79,7 +79,10 @@ template int Mapping> 12); - str[1] = 0x80 | ((c >> 6) & kMask); diff --git a/v8-3.14.5.10-abort-uncaught-exception.patch b/v8-3.14.5.10-abort-uncaught-exception.patch deleted file mode 100644 index b2f2a08..0000000 --- a/v8-3.14.5.10-abort-uncaught-exception.patch +++ /dev/null @@ -1,139 +0,0 @@ -From fbff7054a47551387a99244e2cf0631f30406798 Mon Sep 17 00:00:00 2001 -From: Trevor Norris -Date: Tue, 18 Nov 2014 16:37:54 -0800 -Subject: [PATCH] v8: add api for aborting on uncaught exception - -Add v8::Isolate::SetAbortOnUncaughtException() so the user can be -notified when an uncaught exception has bubbled. - -PR-URL: https://github.com/joyent/node/pull/8666 -Reviewed-by: Trevor Norris ---- - include/v8.h | 11 +++++++++++ - src/api.cc | 5 +++++ - src/isolate.cc | 33 +++++++++++++++++++++++---------- - src/isolate.h | 5 +++++ - 4 files changed, 44 insertions(+), 10 deletions(-) - -diff --git a/include/v8.h b/include/v8.h -index 71a0d01..e229ed9 100644 ---- a/include/v8.h -+++ b/include/v8.h -@@ -2842,6 +2842,17 @@ class V8EXPORT Isolate { - static Isolate* GetCurrent(); - - /** -+ * Custom callback used by embedders to help V8 determine if it should abort -+ * when it throws and no internal handler can catch the exception. -+ * If FLAG_abort_on_uncaught_exception is true, then V8 will abort if either: -+ * - no custom callback is set. -+ * - the custom callback set returns true. -+ * Otherwise it won't abort. -+ */ -+ typedef bool (*abort_on_uncaught_exception_t)(); -+ void SetAbortOnUncaughtException(abort_on_uncaught_exception_t callback); -+ -+ /** - * Methods below this point require holding a lock (using Locker) in - * a multi-threaded environment. - */ -diff --git a/src/api.cc b/src/api.cc -index 96d564f..4b1aa67 100644 ---- a/src/api.cc -+++ b/src/api.cc -@@ -5550,6 +5550,11 @@ void Isolate::Enter() { - isolate->Enter(); - } - -+void Isolate::SetAbortOnUncaughtException( -+ abort_on_uncaught_exception_t callback) { -+ i::Isolate* isolate = reinterpret_cast(this); -+ isolate->SetAbortOnUncaughtException(callback); -+} - - void Isolate::Exit() { - i::Isolate* isolate = reinterpret_cast(this); -diff --git a/src/isolate.cc b/src/isolate.cc -index 5a5293e..0b38616 100644 ---- a/src/isolate.cc -+++ b/src/isolate.cc -@@ -1152,18 +1152,26 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) { - thread_local_top()->pending_message_end_pos_ = location->end_pos(); - } - -- // If the abort-on-uncaught-exception flag is specified, abort on any -- // exception not caught by JavaScript, even when an external handler is -- // present. This flag is intended for use by JavaScript developers, so -- // print a user-friendly stack trace (not an internal one). -+ // If the abort-on-uncaught-exception flag is specified, and if the -+ // exception is not caught by JavaScript (even when an external handler is -+ // present). - if (fatal_exception_depth == 0 && - FLAG_abort_on_uncaught_exception && - (report_exception || can_be_caught_externally)) { -- fatal_exception_depth++; -- fprintf(stderr, "%s\n\nFROM\n", -- *MessageHandler::GetLocalizedMessage(message_obj)); -- PrintCurrentStackTrace(stderr); -- OS::Abort(); -+ // If the embedder didn't specify a custom uncaught exception callback, -+ // or if the custom callback determined that V8 should abort, then -+ // abort -+ bool should_abort = !abort_on_uncaught_exception_callback_ || -+ abort_on_uncaught_exception_callback_(); -+ if (should_abort) { -+ fatal_exception_depth++; -+ // This flag is intended for use by JavaScript developers, so -+ // print a user-friendly stack trace (not an internal one). -+ fprintf(stderr, "%s\n\nFROM\n", -+ *MessageHandler::GetLocalizedMessage(message_obj)); -+ PrintCurrentStackTrace(stderr); -+ OS::Abort(); -+ } - } - } else if (location != NULL && !location->script().is_null()) { - // We are bootstrapping and caught an error where the location is set -@@ -1339,6 +1347,10 @@ void Isolate::SetCaptureStackTraceForUncaughtExceptions( - stack_trace_for_uncaught_exceptions_options_ = options; - } - -+void Isolate::SetAbortOnUncaughtException( -+ v8::Isolate::abort_on_uncaught_exception_t callback) { -+ abort_on_uncaught_exception_callback_ = callback; -+} - - bool Isolate::is_out_of_memory() { - if (has_pending_exception()) { -@@ -1534,7 +1546,8 @@ Isolate::Isolate() - date_cache_(NULL), - context_exit_happened_(false), - deferred_handles_head_(NULL), -- optimizing_compiler_thread_(this) { -+ optimizing_compiler_thread_(this), -+ abort_on_uncaught_exception_callback_(NULL) { - TRACE_ISOLATE(constructor); - - memset(isolate_addresses_, 0, -diff --git a/src/isolate.h b/src/isolate.h -index 2769ca7..8719aa1 100644 ---- a/src/isolate.h -+++ b/src/isolate.h -@@ -692,6 +692,9 @@ class Isolate { - int frame_limit, - StackTrace::StackTraceOptions options); - -+ typedef bool (*abort_on_uncaught_exception_t)(); -+ void SetAbortOnUncaughtException(abort_on_uncaught_exception_t callback); -+ - // Tells whether the current context has experienced an out of memory - // exception. - bool is_out_of_memory(); -@@ -1292,6 +1295,8 @@ class Isolate { - DeferredHandles* deferred_handles_head_; - OptimizingCompilerThread optimizing_compiler_thread_; - -+ abort_on_uncaught_exception_t abort_on_uncaught_exception_callback_; -+ - friend class ExecutionAccess; - friend class HandleScopeImplementer; - friend class IsolateInitializer; diff --git a/v8-3.14.5.10-busy-loop.patch b/v8-3.14.5.10-busy-loop.patch deleted file mode 100644 index fcc906c..0000000 --- a/v8-3.14.5.10-busy-loop.patch +++ /dev/null @@ -1,137 +0,0 @@ -From 6ebd85e10535dfaa9181842fe73834e51d4d3e6c Mon Sep 17 00:00:00 2001 -From: Ben Noordhuis -Date: Thu, 27 Nov 2014 07:15:54 +0100 -Subject: [PATCH] v8: don't busy loop in cpu profiler thread - -Reduce the overhead of the CPU profiler by replacing sched_yield() with -nanosleep() in V8's tick event processor thread. The former only yields -the CPU when there is another process scheduled on the same CPU. - -Before this commit, the thread would effectively busy loop and consume -100% CPU time. By forcing a one nanosecond sleep period rounded up to -the task scheduler's granularity (about 50 us on Linux), CPU usage for -the processor thread now hovers around 10-20% for a busy application. - -PR-URL: https://github.com/joyent/node/pull/8789 -Ref: https://github.com/strongloop/strong-agent/issues/3 -Reviewed-by: Trevor Norris ---- - src/platform-freebsd.cc | 5 ----- - src/platform-linux.cc | 5 ----- - src/platform-macos.cc | 5 ----- - src/platform-openbsd.cc | 5 ----- - src/platform-posix.cc | 6 ++++++ - src/platform-solaris.cc | 5 ----- - tools/gyp/v8.gyp | 2 +- - 7 files changed, 7 insertions(+), 26 deletions(-) - -diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc -index 511759c..5c90c6b 100644 ---- a/src/platform-freebsd.cc -+++ b/src/platform-freebsd.cc -@@ -539,11 +539,6 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) { - } - - --void Thread::YieldCPU() { -- sched_yield(); --} -- -- - class FreeBSDMutex : public Mutex { - public: - FreeBSDMutex() { -diff --git a/src/platform-linux.cc b/src/platform-linux.cc -index beb2cce..3d6b304 100644 ---- a/src/platform-linux.cc -+++ b/src/platform-linux.cc -@@ -812,11 +812,6 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) { - } - - --void Thread::YieldCPU() { -- sched_yield(); --} -- -- - class LinuxMutex : public Mutex { - public: - LinuxMutex() { -diff --git a/src/platform-macos.cc b/src/platform-macos.cc -index a216f6e..e54e3e4 100644 ---- a/src/platform-macos.cc -+++ b/src/platform-macos.cc -@@ -640,11 +640,6 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) { - } - - --void Thread::YieldCPU() { -- sched_yield(); --} -- -- - class MacOSMutex : public Mutex { - public: - MacOSMutex() { -diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc -index 408d4dc..72167de 100644 ---- a/src/platform-openbsd.cc -+++ b/src/platform-openbsd.cc -@@ -593,11 +593,6 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) { - } - - --void Thread::YieldCPU() { -- sched_yield(); --} -- -- - class OpenBSDMutex : public Mutex { - public: - OpenBSDMutex() { -diff --git a/src/platform-posix.cc b/src/platform-posix.cc -index 5c3529d..8aecd56 100644 ---- a/src/platform-posix.cc -+++ b/src/platform-posix.cc -@@ -392,6 +392,12 @@ void OS::StrNCpy(Vector dest, const char* src, size_t n) { - } - - -+void Thread::YieldCPU() { -+ const timespec delay = { 0, 1 }; -+ nanosleep(&delay, NULL); -+} -+ -+ - // ---------------------------------------------------------------------------- - // POSIX socket support. - // -diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc -index 07718fe..4e95ecc 100644 ---- a/src/platform-solaris.cc -+++ b/src/platform-solaris.cc -@@ -527,11 +527,6 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) { - } - - --void Thread::YieldCPU() { -- sched_yield(); --} -- -- - class SolarisMutex : public Mutex { - public: - SolarisMutex() { -diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp -index 71cf366..c304925 100644 ---- a/tools/gyp/v8.gyp -+++ b/tools/gyp/v8.gyp -@@ -715,7 +715,7 @@ - ['OS=="solaris"', { - 'link_settings': { - 'libraries': [ -- '-lsocket -lnsl', -+ '-lsocket -lnsl -lrt', - ]}, - 'sources': [ - '../../src/platform-solaris.cc', diff --git a/v8-3.14.5.10-enumeration.patch b/v8-3.14.5.10-enumeration.patch deleted file mode 100644 index 4dea2a5..0000000 --- a/v8-3.14.5.10-enumeration.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 196184d332ba2d2defc56ad0b37653659a7d3ec0 Mon Sep 17 00:00:00 2001 -From: "svenpanne@chromium.org" -Date: Fri, 9 Nov 2012 11:30:05 +0000 -Subject: [PATCH] v8: backport codereview.chromium.org/11362182 - -Keep the number of descriptors below -DescriptorArray::kMaxNumberOfDescriptors even for accessors - -Review URL: https://codereview.chromium.org/11362182 ---- - src/objects.cc | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - -diff --git a/src/objects.cc b/src/objects.cc ---- a/src/objects.cc -+++ b/src/objects.cc -@@ -4453,7 +4453,9 @@ MaybeObject* JSObject::DefinePropertyAccessor(String* name, - // to do a lookup, which seems to be a bit of overkill. - Heap* heap = GetHeap(); - bool only_attribute_changes = getter->IsNull() && setter->IsNull(); -- if (HasFastProperties() && !only_attribute_changes) { -+ if (HasFastProperties() && !only_attribute_changes && -+ (map()->NumberOfOwnDescriptors() < -+ DescriptorArray::kMaxNumberOfDescriptors)) { - MaybeObject* getterOk = heap->undefined_value(); - if (!getter->IsNull()) { - getterOk = DefineFastAccessor(name, ACCESSOR_GETTER, getter, attributes); --- -1.8.5.1 - diff --git a/v8-3.14.5.10-gcc7.patch b/v8-3.14.5.10-gcc7.patch deleted file mode 100644 index f929b4a..0000000 --- a/v8-3.14.5.10-gcc7.patch +++ /dev/null @@ -1,223 +0,0 @@ -diff -up v8-3.14.5.10/src/arm/assembler-arm.cc.gcc7 v8-3.14.5.10/src/arm/assembler-arm.cc -diff -up v8-3.14.5.10/src/deoptimizer.cc.gcc7 v8-3.14.5.10/src/deoptimizer.cc ---- v8-3.14.5.10/src/deoptimizer.cc.gcc7 2012-10-22 09:09:53.000000000 -0400 -+++ v8-3.14.5.10/src/deoptimizer.cc 2017-02-28 16:55:25.553045035 -0500 -@@ -1141,7 +1141,7 @@ bool Deoptimizer::DoOsrTranslateCommand( - } - output->SetRegister(output_reg, static_cast(uint32_value)); - } -- -+ // intentional fallthrough - - case Translation::DOUBLE_REGISTER: { - // Abort OSR if we don't have a number. -diff -up v8-3.14.5.10/src/ia32/assembler-ia32.cc.gcc7 v8-3.14.5.10/src/ia32/assembler-ia32.cc ---- v8-3.14.5.10/src/ia32/assembler-ia32.cc.gcc7 2017-03-01 10:21:11.271775490 -0500 -+++ v8-3.14.5.10/src/ia32/assembler-ia32.cc 2017-03-01 10:21:44.242983779 -0500 -@@ -420,6 +420,7 @@ void Assembler::Nop(int bytes) { - switch (bytes) { - case 2: - EMIT(0x66); -+ // intentional fallthrough - case 1: - EMIT(0x90); - return; -@@ -436,6 +437,7 @@ void Assembler::Nop(int bytes) { - return; - case 6: - EMIT(0x66); -+ // intentional fallthrough - case 5: - EMIT(0xf); - EMIT(0x1f); -@@ -456,12 +458,15 @@ void Assembler::Nop(int bytes) { - case 11: - EMIT(0x66); - bytes--; -+ // intentional fallthrough - case 10: - EMIT(0x66); - bytes--; -+ // intentional fallthrough - case 9: - EMIT(0x66); - bytes--; -+ // intentional fallthrough - case 8: - EMIT(0xf); - EMIT(0x1f); -diff -up v8-3.14.5.10/src/ic.cc.gcc7 v8-3.14.5.10/src/ic.cc ---- v8-3.14.5.10/src/ic.cc.gcc7 2012-10-22 09:09:53.000000000 -0400 -+++ v8-3.14.5.10/src/ic.cc 2017-02-28 16:55:25.554045011 -0500 -@@ -1989,8 +1989,8 @@ void KeyedStoreIC::UpdateCaches(LookupRe - name, receiver, field_index, transition, strict_mode); - break; - } -- // fall through. - } -+ // intentional fallthrough - case NORMAL: - case CONSTANT_FUNCTION: - case CALLBACKS: -diff -up v8-3.14.5.10/src/objects.cc.gcc7 v8-3.14.5.10/src/objects.cc ---- v8-3.14.5.10/src/objects.cc.gcc7 2017-02-28 16:55:25.516045908 -0500 -+++ v8-3.14.5.10/src/objects.cc 2017-02-28 16:55:25.555044988 -0500 -@@ -10302,7 +10302,7 @@ void JSObject::GetElementsCapacityAndUsa - *used = Smi::cast(JSArray::cast(this)->length())->value(); - break; - } -- // Fall through if packing is not guaranteed. -+ // intentional fallthrough - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - backing_store = FixedArray::cast(backing_store_base); -@@ -10324,7 +10324,7 @@ void JSObject::GetElementsCapacityAndUsa - *used = Smi::cast(JSArray::cast(this)->length())->value(); - break; - } -- // Fall through if packing is not guaranteed. -+ // intentional fallthrough - case FAST_HOLEY_DOUBLE_ELEMENTS: { - FixedDoubleArray* elms = FixedDoubleArray::cast(elements()); - *capacity = elms->length(); -diff -up v8-3.14.5.10/src/objects.h.gcc7 v8-3.14.5.10/src/objects.h ---- v8-3.14.5.10/src/objects.h.gcc7 2017-02-28 16:55:25.517045885 -0500 -+++ v8-3.14.5.10/src/objects.h 2017-02-28 16:55:25.556044964 -0500 -@@ -2785,24 +2785,10 @@ class HashTable: public FixedArray { - USE_CUSTOM_MINIMUM_CAPACITY - }; - -- // Wrapper methods -- inline uint32_t Hash(Key key) { -- if (Shape::UsesSeed) { -- return Shape::SeededHash(key, -- GetHeap()->HashSeed()); -- } else { -- return Shape::Hash(key); -- } -- } -- -- inline uint32_t HashForObject(Key key, Object* object) { -- if (Shape::UsesSeed) { -- return Shape::SeededHashForObject(key, -- GetHeap()->HashSeed(), object); -- } else { -- return Shape::HashForObject(key, object); -- } -- } -+ // Wrapper methods. Defined in src/objects-inl.h -+ // to break a cycle with src/heap/heap.h. -+ inline uint32_t Hash(Key key); -+ inline uint32_t HashForObject(Key key, Object* object); - - // Returns the number of elements in the hash table. - int NumberOfElements() { -diff -up v8-3.14.5.10/src/objects-inl.h.gcc7 v8-3.14.5.10/src/objects-inl.h ---- v8-3.14.5.10/src/objects-inl.h.gcc7 2017-02-28 16:55:25.517045885 -0500 -+++ v8-3.14.5.10/src/objects-inl.h 2017-02-28 16:55:25.556044964 -0500 -@@ -52,6 +52,26 @@ - namespace v8 { - namespace internal { - -+template -+uint32_t HashTable::Hash(Key key) { -+ if (Shape::UsesSeed) { -+ return Shape::SeededHash(key, -+ GetHeap()->HashSeed()); -+ } else { -+ return Shape::Hash(key); -+ } -+} -+ -+template -+uint32_t HashTable::HashForObject(Key key, Object* object) { -+ if (Shape::UsesSeed) { -+ return Shape::SeededHashForObject(key, -+ GetHeap()->HashSeed(), object); -+ } else { -+ return Shape::HashForObject(key, object); -+ } -+} -+ - PropertyDetails::PropertyDetails(Smi* smi) { - value_ = smi->value(); - } -diff -up v8-3.14.5.10/src/parser.cc.gcc7 v8-3.14.5.10/src/parser.cc ---- v8-3.14.5.10/src/parser.cc.gcc7 2017-02-28 16:55:25.450047466 -0500 -+++ v8-3.14.5.10/src/parser.cc 2017-02-28 16:55:25.557044941 -0500 -@@ -3649,8 +3649,7 @@ Expression* Parser::ParsePrimaryExpressi - result = ParseV8Intrinsic(CHECK_OK); - break; - } -- // If we're not allowing special syntax we fall-through to the -- // default case. -+ // intentional fallthrough - - default: { - Token::Value tok = Next(); -@@ -5376,8 +5375,8 @@ RegExpTree* RegExpParser::ParseDisjuncti - if (ParseIntervalQuantifier(&dummy, &dummy)) { - ReportError(CStrVector("Nothing to repeat") CHECK_FAILED); - } -- // fallthrough - } -+ // intentional fallthrough - default: - builder->AddCharacter(current()); - Advance(); -diff -up v8-3.14.5.10/src/spaces.h.gcc7 v8-3.14.5.10/src/spaces.h ---- v8-3.14.5.10/src/spaces.h.gcc7 2012-10-22 09:09:53.000000000 -0400 -+++ v8-3.14.5.10/src/spaces.h 2017-02-28 16:55:25.557044941 -0500 -@@ -2613,15 +2613,15 @@ class PointerChunkIterator BASE_EMBEDDED - return old_pointer_iterator_.next(); - } - state_ = kMapState; -- // Fall through. - } -+ // intentional fallthrough - case kMapState: { - if (map_iterator_.has_next()) { - return map_iterator_.next(); - } - state_ = kLargeObjectState; -- // Fall through. - } -+ // intentional fallthrough - case kLargeObjectState: { - HeapObject* heap_object; - do { -diff -up v8-3.14.5.10/src/x64/assembler-x64.cc.gcc7 v8-3.14.5.10/src/x64/assembler-x64.cc ---- v8-3.14.5.10/src/x64/assembler-x64.cc.gcc7 2017-03-01 10:19:40.086088012 -0500 -+++ v8-3.14.5.10/src/x64/assembler-x64.cc 2017-03-01 10:20:51.859241627 -0500 -@@ -1800,6 +1800,7 @@ void Assembler::Nop(int n) { - switch (n) { - case 2: - emit(0x66); -+ // intentional fallthrough - case 1: - emit(0x90); - return; -@@ -1816,6 +1817,7 @@ void Assembler::Nop(int n) { - return; - case 6: - emit(0x66); -+ // intentional fallthrough - case 5: - emit(0x0f); - emit(0x1f); -@@ -1836,12 +1838,15 @@ void Assembler::Nop(int n) { - case 11: - emit(0x66); - n--; -+ // intentional fallthrough - case 10: - emit(0x66); - n--; -+ // intentional fallthrough - case 9: - emit(0x66); - n--; -+ // intentional fallthrough - case 8: - emit(0x0f); - emit(0x1f); diff --git a/v8-3.14.5.10-gcc8.patch b/v8-3.14.5.10-gcc8.patch deleted file mode 100644 index ab6a2a3..0000000 --- a/v8-3.14.5.10-gcc8.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff -up v8-3.14.5.10/src/frames.h.gcc8 v8-3.14.5.10/src/frames.h ---- v8-3.14.5.10/src/frames.h.gcc8 2018-05-18 14:41:15.119069125 -0400 -+++ v8-3.14.5.10/src/frames.h 2018-05-18 14:42:07.421810155 -0400 -@@ -67,7 +67,7 @@ class InnerPointerToCodeCache { - Code* GcSafeCastToCode(HeapObject* object, Address inner_pointer); - - void Flush() { -- memset(&cache_[0], 0, sizeof(cache_)); -+ memset(static_cast(&cache_[0]), 0, sizeof(cache_)); - } - - InnerPointerToCodeCacheEntry* GetCacheEntry(Address inner_pointer); diff --git a/v8-3.14.5.10-mem-corruption-stack-overflow.patch b/v8-3.14.5.10-mem-corruption-stack-overflow.patch deleted file mode 100644 index 452464b..0000000 --- a/v8-3.14.5.10-mem-corruption-stack-overflow.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 530af9cb8e700e7596b3ec812bad123c9fa06356 Mon Sep 17 00:00:00 2001 -From: Fedor Indutny -Date: Wed, 30 Jul 2014 15:33:52 -0700 -Subject: [PATCH] v8: Interrupts must not mask stack overflow. - -Backport of https://codereview.chromium.org/339883002 ---- - src/isolate.h | 9 ++------- - 1 file changed, 2 insertions(+), 7 deletions(-) - -diff --git a/src/isolate.h b/src/isolate.h -index b90191d..2769ca7 100644 ---- a/src/isolate.h -+++ b/src/isolate.h -@@ -1392,14 +1392,9 @@ class StackLimitCheck BASE_EMBEDDED { - public: - explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { } - -- bool HasOverflowed() const { -+ inline bool HasOverflowed() const { - StackGuard* stack_guard = isolate_->stack_guard(); -- // Stack has overflowed in C++ code only if stack pointer exceeds the C++ -- // stack guard and the limits are not set to interrupt values. -- // TODO(214): Stack overflows are ignored if a interrupt is pending. This -- // code should probably always use the initial C++ limit. -- return (reinterpret_cast(this) < stack_guard->climit()) && -- stack_guard->IsStackOverflow(); -+ return reinterpret_cast(this) < stack_guard->real_climit(); - } - private: - Isolate* isolate_; --- -2.0.3 diff --git a/v8-3.14.5.10-profiler-log.patch b/v8-3.14.5.10-profiler-log.patch deleted file mode 100644 index 2530e54..0000000 --- a/v8-3.14.5.10-profiler-log.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 431eb172f97434a3b0868a610bc14d8ff7d9efd9 Mon Sep 17 00:00:00 2001 -From: Ben Noordhuis -Date: Fri, 16 Jan 2015 13:44:42 +0100 -Subject: [PATCH] deps: log V8 version in profiler log file - -Patch from issue 800293002 authored by ben@strongloop.com - -Review URL: https://codereview.chromium.org/806143002 - -PR-URL: https://github.com/joyent/node/pull/9043 -Reviewed-by: Trevor Norris -Reviewed-By: Timothy J Fontaine ---- - src/log-utils.cc | 9 +++++++++ - 1 file changed, 9 insertions(+) - -diff --git a/src/log-utils.cc b/src/log-utils.cc -index 5e607da..622cc51 100644 ---- a/src/log-utils.cc -+++ b/src/log-utils.cc -@@ -29,6 +29,7 @@ - - #include "log-utils.h" - #include "string-stream.h" -+#include "version.h" - - namespace v8 { - namespace internal { -@@ -136,6 +137,14 @@ void Log::Initialize() { - } - } - } -+ -+ if (output_handle_ != NULL) { -+ LogMessageBuilder msg(logger_); -+ msg.Append("v8-version,%d,%d,%d,%d,%d\n", Version::GetMajor(), -+ Version::GetMinor(), Version::GetBuild(), Version::GetPatch(), -+ Version::IsCandidate()); -+ msg.WriteToLogFile(); -+ } - } - - diff --git a/v8-3.14.5.10-report-builtins-by-name.patch b/v8-3.14.5.10-report-builtins-by-name.patch deleted file mode 100644 index 1d2410b..0000000 --- a/v8-3.14.5.10-report-builtins-by-name.patch +++ /dev/null @@ -1,16 +0,0 @@ -diff -up v8-3.14.5.10/src/log.cc.builtinnames v8-3.14.5.10/src/log.cc ---- v8-3.14.5.10/src/log.cc.builtinnames 2016-07-06 11:25:12.341766992 -0400 -+++ v8-3.14.5.10/src/log.cc 2016-07-06 11:25:41.065609632 -0400 -@@ -1485,7 +1485,11 @@ void Logger::LogCodeObject(Object* objec - tag = Logger::STUB_TAG; - break; - case Code::BUILTIN: -- description = "A builtin from the snapshot"; -+ description = -+ Isolate::Current()->builtins()->Lookup(code_object->entry()); -+ if (description == NULL) { -+ description = "A builtin from the snapshot"; -+ } - tag = Logger::BUILTIN_TAG; - break; - case Code::KEYED_LOAD_IC: diff --git a/v8-3.14.5.10-system-valgrind.patch b/v8-3.14.5.10-system-valgrind.patch deleted file mode 100644 index 1e5b0cb..0000000 --- a/v8-3.14.5.10-system-valgrind.patch +++ /dev/null @@ -1,44 +0,0 @@ -diff -up v8-3.14.5.10/src/ia32/cpu-ia32.cc.system-valgrind v8-3.14.5.10/src/ia32/cpu-ia32.cc ---- v8-3.14.5.10/src/ia32/cpu-ia32.cc.system-valgrind 2012-01-16 06:42:08.000000000 -0500 -+++ v8-3.14.5.10/src/ia32/cpu-ia32.cc 2014-12-02 15:15:07.819525430 -0500 -@@ -28,7 +28,7 @@ - // CPU specific code for ia32 independent of OS goes here. - - #ifdef __GNUC__ --#include "third_party/valgrind/valgrind.h" -+#include - #endif - - #include "v8.h" -@@ -67,8 +67,7 @@ void CPU::FlushICache(void* start, size_ - // solution is to run valgrind with --smc-check=all, but this comes at a big - // performance cost. We can notify valgrind to invalidate its cache. - #ifdef VALGRIND_DISCARD_TRANSLATIONS -- unsigned res = VALGRIND_DISCARD_TRANSLATIONS(start, size); -- USE(res); -+ VALGRIND_DISCARD_TRANSLATIONS(start, size); - #endif - } - -diff -up v8-3.14.5.10/src/x64/cpu-x64.cc.system-valgrind v8-3.14.5.10/src/x64/cpu-x64.cc ---- v8-3.14.5.10/src/x64/cpu-x64.cc.system-valgrind 2012-02-23 03:45:21.000000000 -0500 -+++ v8-3.14.5.10/src/x64/cpu-x64.cc 2014-12-02 15:14:51.289621074 -0500 -@@ -28,7 +28,7 @@ - // CPU specific code for x64 independent of OS goes here. - - #if defined(__GNUC__) && !defined(__MINGW64__) --#include "third_party/valgrind/valgrind.h" -+#include - #endif - - #include "v8.h" -@@ -67,8 +67,7 @@ void CPU::FlushICache(void* start, size_ - // solution is to run valgrind with --smc-check=all, but this comes at a big - // performance cost. We can notify valgrind to invalidate its cache. - #ifdef VALGRIND_DISCARD_TRANSLATIONS -- unsigned res = VALGRIND_DISCARD_TRANSLATIONS(start, size); -- USE(res); -+ VALGRIND_DISCARD_TRANSLATIONS(start, size); - #endif - } - diff --git a/v8-3.14.5.10-unhandled-ReferenceError.patch b/v8-3.14.5.10-unhandled-ReferenceError.patch deleted file mode 100644 index 7762fa8..0000000 --- a/v8-3.14.5.10-unhandled-ReferenceError.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 0ff51c6e063e3eea9e4d9ea68edc82d935626fc7 Mon Sep 17 00:00:00 2001 -From: Julien Gilli -Date: Fri, 28 Nov 2014 15:33:35 -0800 -Subject: [PATCH] deps: backport 2ad2237 from v8 upstream - -Original commit message: - -Fix Unhandled ReferenceError in debug-debugger.js - -This fixes following exception in Sky on attempt to set a breakpoint -"Unhandled: Uncaught ReferenceError: break_point is not defined" -I think this happens in Sky but not in Chrome because Sky scripts are executed in strict mode. - -BUG=None -LOG=N -R=yangguo@chromium.org - -Review URL: https://codereview.chromium.org/741683002 - -Cr-Commit-Position: refs/heads/master@{#25415} ---- - src/debug-debugger.js | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - -diff --git a/src/debug-debugger.js b/src/debug-debugger.js -index dfad902..a27961f 100644 ---- a/src/debug-debugger.js -+++ b/src/debug-debugger.js -@@ -442,7 +442,7 @@ ScriptBreakPoint.prototype.set = function (script) { - if (position === null) return; - - // Create a break point object and set the break point. -- break_point = MakeBreakPoint(position, this); -+ var break_point = MakeBreakPoint(position, this); - break_point.setIgnoreCount(this.ignoreCount()); - var actual_position = %SetScriptBreakPoint(script, position, break_point); - if (IS_UNDEFINED(actual_position)) { diff --git a/v8-3.14.5.10-unused-local-typedefs.patch b/v8-3.14.5.10-unused-local-typedefs.patch deleted file mode 100644 index c168875..0000000 --- a/v8-3.14.5.10-unused-local-typedefs.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 53b4accb6e5747b156be91a2b90f42607e33a7cc Mon Sep 17 00:00:00 2001 -From: Timothy J Fontaine -Date: Mon, 4 Aug 2014 13:43:50 -0700 -Subject: [PATCH] v8: Fix compliation with GCC 4.8 - -Supresses a very loud warning from GCC 4.8 about unused typedefs - -Original url https://codereview.chromium.org/69413002 ---- - src/checks.h | 9 ++++++++- - 1 file changed, 8 insertions(+), 1 deletion(-) - -diff --git a/src/checks.h b/src/checks.h -index d0a0c2b..4396ada 100644 ---- a/src/checks.h -+++ b/src/checks.h -@@ -230,6 +230,13 @@ inline void CheckNonEqualsHelper(const char* file, - #define CHECK_LE(a, b) CHECK((a) <= (b)) - - -+#if defined(__clang__) || defined(__GNUC__) -+# define V8_UNUSED __attribute__((unused)) -+#else -+# define V8_UNUSED -+#endif -+ -+ - // This is inspired by the static assertion facility in boost. This - // is pretty magical. If it causes you trouble on a platform you may - // find a fix in the boost code. -@@ -248,7 +255,7 @@ template class StaticAssertionHelper { }; - #define STATIC_CHECK(test) \ - typedef \ - StaticAssertionHelper((test))>)> \ -- SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) -+ SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) V8_UNUSED - - - extern bool FLAG_enable_slow_asserts; diff --git a/v8-3.14.5.10-use-clock_gettime.patch b/v8-3.14.5.10-use-clock_gettime.patch deleted file mode 100644 index cb25591..0000000 --- a/v8-3.14.5.10-use-clock_gettime.patch +++ /dev/null @@ -1,122 +0,0 @@ -From f9ced08de30c37838756e8227bd091f80ad9cafa Mon Sep 17 00:00:00 2001 -From: Ben Noordhuis -Date: Thu, 24 Apr 2014 04:27:40 +0200 -Subject: [PATCH] deps: make v8 use CLOCK_REALTIME_COARSE - -Date.now() indirectly calls gettimeofday() on Linux and that's a system -call that is extremely expensive on virtualized systems when the host -operating system has to emulate access to the hardware clock. - -Case in point: output from `perf record -c 10000 -e cycles:u -g -i` -for a benchmark/http_simple bytes/8 benchmark with a light load of -50 concurrent clients: - - 53.69% node node [.] v8::internal::OS::TimeCurrentMillis() - | - --- v8::internal::OS::TimeCurrentMillis() - | - |--99.77%-- v8::internal::Runtime_DateCurrentTime(v8::internal::Arguments, v8::internal::Isolate*) - | 0x23587880618e - -That's right - over half of user time spent inside the V8 function that -calls gettimeofday(). - -Notably, nearly all system time gets attributed to acpi_pm_read(), the -kernel function that reads the ACPI power management timer: - - 32.49% node [kernel.kallsyms] [k] acpi_pm_read - | - --- acpi_pm_read - | - |--98.40%-- __getnstimeofday - | getnstimeofday - | | - | |--71.61%-- do_gettimeofday - | | sys_gettimeofday - | | system_call_fastpath - | | 0x7fffbbaf6dbc - | | | - | | |--98.72%-- v8::internal::OS::TimeCurrentMillis() - -The cost of the gettimeofday() system call is normally measured in -nanoseconds but we were seeing 100 us averages and spikes >= 1000 us. -The numbers were so bad, my initial hunch was that the node process was -continuously getting rescheduled inside the system call... - -v8::internal::OS::TimeCurrentMillis()'s most frequent caller is -v8::internal::Runtime_DateCurrentTime(), the V8 run-time function -that's behind Date.now(). The timeout handling logic in lib/http.js -and lib/net.js calls into lib/timers.js and that module will happily -call Date.now() hundreds or even thousands of times per second. -If you saw exports._unrefActive() show up in --prof output a lot, -now you know why. - -That's why this commit makes V8 switch over to clock_gettime() on Linux. -In particular, it checks if CLOCK_REALTIME_COARSE is available and has -a resolution <= 1 ms because in that case the clock_gettime() call can -be fully serviced from the vDSO. - -It speeds up the aforementioned benchmark by about 100% on the affected -systems and should go a long way toward addressing the latency issues -that StrongLoop customers have been reporting. - -This patch will be upstreamed as a CR against V8 3.26. I'm sending it -as a pull request for v0.10 first because that's what our users are -running and because the delta between 3.26 and 3.14 is too big to -reasonably back-port the patch. I'll open a pull request for the -master branch once the CR lands upstream. - -Signed-off-by: Trevor Norris -Signed-off-by: Fedor Indutny ---- - src/platform-posix.cc | 26 ++++++++++++++++++++++---- - 1 file changed, 22 insertions(+), 4 deletions(-) - -diff --git a/src/platform-posix.cc b/src/platform-posix.cc -index ad74eba..3c86868 100644 ---- a/src/platform-posix.cc -+++ b/src/platform-posix.cc -@@ -188,19 +188,37 @@ int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) { - - - double OS::TimeCurrentMillis() { -- struct timeval tv; -- if (gettimeofday(&tv, NULL) < 0) return 0.0; -- return (static_cast(tv.tv_sec) * 1000) + -- (static_cast(tv.tv_usec) / 1000); -+ return static_cast(Ticks()) / 1000; - } - - - int64_t OS::Ticks() { -+#if defined(__linux__) -+ static clockid_t clock_id = static_cast(-1); -+ struct timespec spec; -+ if (clock_id == static_cast(-1)) { -+ // CLOCK_REALTIME_COARSE may not be defined by the system headers but -+ // might still be supported by the kernel so use the clock id directly. -+ // Only use CLOCK_REALTIME_COARSE when its granularity <= 1 ms. -+ const clockid_t clock_realtime_coarse = 5; -+ if (clock_getres(clock_realtime_coarse, &spec) == 0 && -+ spec.tv_nsec <= 1000 * 1000) { -+ clock_id = clock_realtime_coarse; -+ } else { -+ clock_id = CLOCK_REALTIME; -+ } -+ } -+ if (clock_gettime(clock_id, &spec) != 0) { -+ return 0; // Not really possible. -+ } -+ return static_cast(spec.tv_sec) * 1000000 + (spec.tv_nsec / 1000); -+#else - // gettimeofday has microsecond resolution. - struct timeval tv; - if (gettimeofday(&tv, NULL) < 0) - return 0; - return (static_cast(tv.tv_sec) * 1000000) + tv.tv_usec; -+#endif - } - - --- -1.9.1 diff --git a/v8-3.14.5.10-x64-MathMinMax.patch b/v8-3.14.5.10-x64-MathMinMax.patch deleted file mode 100644 index e7b167d..0000000 --- a/v8-3.14.5.10-x64-MathMinMax.patch +++ /dev/null @@ -1,105 +0,0 @@ -From 3530fa9cd09f8db8101c4649cab03bcdf760c434 Mon Sep 17 00:00:00 2001 -From: Fedor Indutny -Date: Fri, 21 Dec 2012 17:52:00 +0000 -Subject: [PATCH] deps: backport 4ed5fde4f from v8 upstream - -Original commit message: - - Fix x64 MathMinMax for negative untagged int32 arguments. - - An untagged int32 has zeros in the upper half even if it is negative. - Using cmpq to compare such numbers will incorrectly ignore the sign. - - BUG=164442 - R=mvstanton@chromium.org - - Review URL: https://chromiumcodereview.appspot.com/11665007 - - git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@13273 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 - -Signed-off-by: Fedor Indutny ---- - src/x64/lithium-codegen-x64.cc | 6 ++-- - test/mjsunit/regress/regress-164442.js | 45 ++++++++++++++++++++++++++ - 2 files changed, 48 insertions(+), 3 deletions(-) - create mode 100644 test/mjsunit/regress/regress-164442.js - -diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc -index b461e62..ff01f44 100644 ---- a/src/x64/lithium-codegen-x64.cc -+++ b/src/x64/lithium-codegen-x64.cc -@@ -1457,17 +1457,17 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { - if (right->IsConstantOperand()) { - Immediate right_imm = - Immediate(ToInteger32(LConstantOperand::cast(right))); -- __ cmpq(left_reg, right_imm); -+ __ cmpl(left_reg, right_imm); - __ j(condition, &return_left, Label::kNear); - __ movq(left_reg, right_imm); - } else if (right->IsRegister()) { - Register right_reg = ToRegister(right); -- __ cmpq(left_reg, right_reg); -+ __ cmpl(left_reg, right_reg); - __ j(condition, &return_left, Label::kNear); - __ movq(left_reg, right_reg); - } else { - Operand right_op = ToOperand(right); -- __ cmpq(left_reg, right_op); -+ __ cmpl(left_reg, right_op); - __ j(condition, &return_left, Label::kNear); - __ movq(left_reg, right_op); - } -diff --git a/test/mjsunit/regress/regress-164442.js b/test/mjsunit/regress/regress-164442.js -new file mode 100644 -index 0000000..1160d87 ---- /dev/null -+++ b/test/mjsunit/regress/regress-164442.js -@@ -0,0 +1,45 @@ -+// Copyright 2012 the V8 project authors. All rights reserved. -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions are -+// met: -+// -+// * Redistributions of source code must retain the above copyright -+// notice, this list of conditions and the following disclaimer. -+// * Redistributions in binary form must reproduce the above -+// copyright notice, this list of conditions and the following -+// disclaimer in the documentation and/or other materials provided -+// with the distribution. -+// * Neither the name of Google Inc. nor the names of its -+// contributors may be used to endorse or promote products derived -+// from this software without specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+// Flags: --allow-natives-syntax -+ -+// Should not take a very long time (n^2 algorithms are bad) -+ -+ -+function ensureNotNegative(x) { -+ return Math.max(0, x | 0); -+} -+ -+ -+ensureNotNegative(1); -+ensureNotNegative(2); -+ -+%OptimizeFunctionOnNextCall(ensureNotNegative); -+ -+var r = ensureNotNegative(-1); -+ -+assertEquals(0, r); --- -2.0.3 - diff --git a/v8-3.14.5.10-x64-compare-stubs.patch b/v8-3.14.5.10-x64-compare-stubs.patch deleted file mode 100644 index bf7c542..0000000 --- a/v8-3.14.5.10-x64-compare-stubs.patch +++ /dev/null @@ -1,116 +0,0 @@ -From a960d1707a0038bfa5546c669b5b63c35bdb75c5 Mon Sep 17 00:00:00 2001 -From: Fedor Indutny -Date: Fri, 2 May 2014 22:44:45 +0400 -Subject: [PATCH] deps: backport 23f2736a from v8 upstream - -Original text: - - Fix corner case in x64 compare stubs. - - BUG=v8:2416 - - Review URL: https://codereview.chromium.org/11413087 - -fix #7528 ---- - src/x64/code-stubs-x64.cc | 2 +- - test/mjsunit/regress/regress-2416.js | 75 ++++++++++++++++++++++++++++ - 2 files changed, 76 insertions(+), 1 deletion(-) - create mode 100644 test/mjsunit/regress/regress-2416.js - -diff --git a/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc -index f0f9c5d..9ad0167 100644 ---- a/src/x64/code-stubs-x64.cc -+++ b/src/x64/code-stubs-x64.cc -@@ -5580,7 +5580,7 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) { - __ subq(rdx, rax); - __ j(no_overflow, &done, Label::kNear); - // Correct sign of result in case of overflow. -- __ SmiNot(rdx, rdx); -+ __ not_(rdx); - __ bind(&done); - __ movq(rax, rdx); - } -diff --git a/test/mjsunit/regress/regress-2416.js b/deps/v8/test/mjsunit/regress/regress-2416.js -new file mode 100644 -index 0000000..02afeb9 ---- /dev/null -+++ b/test/mjsunit/regress/regress-2416.js -@@ -0,0 +1,75 @@ -+// Copyright 2012 the V8 project authors. All rights reserved. -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions are -+// met: -+// -+// * Redistributions of source code must retain the above copyright -+// notice, this list of conditions and the following disclaimer. -+// * Redistributions in binary form must reproduce the above -+// copyright notice, this list of conditions and the following -+// disclaimer in the documentation and/or other materials provided -+// with the distribution. -+// * Neither the name of Google Inc. nor the names of its -+// contributors may be used to endorse or promote products derived -+// from this software without specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+assertFalse(2147483647 < -2147483648) -+assertFalse(2147483647 <= -2147483648) -+assertFalse(2147483647 == -2147483648) -+assertTrue(2147483647 >= -2147483648) -+assertTrue(2147483647 > -2147483648) -+ -+assertTrue(-2147483648 < 2147483647) -+assertTrue(-2147483648 <= 2147483647) -+assertFalse(-2147483648 == 2147483647) -+assertFalse(-2147483648 >= 2147483647) -+assertFalse(-2147483648 > 2147483647) -+ -+assertFalse(2147483647 < 2147483647) -+assertTrue(2147483647 <= 2147483647) -+assertTrue(2147483647 == 2147483647) -+assertTrue(2147483647 >= 2147483647) -+assertFalse(2147483647 > 2147483647) -+ -+assertFalse(-2147483648 < -2147483648) -+assertTrue(-2147483648 <= -2147483648) -+assertTrue(-2147483648 == -2147483648) -+assertTrue(-2147483648 >= -2147483648) -+assertFalse(-2147483648 > -2147483648) -+ -+ -+assertFalse(1073741823 < -1073741824) -+assertFalse(1073741823 <= -1073741824) -+assertFalse(1073741823 == -1073741824) -+assertTrue(1073741823 >= -1073741824) -+assertTrue(1073741823 > -1073741824) -+ -+assertTrue(-1073741824 < 1073741823) -+assertTrue(-1073741824 <= 1073741823) -+assertFalse(-1073741824 == 1073741823) -+assertFalse(-1073741824 >= 1073741823) -+assertFalse(-1073741824 > 1073741823) -+ -+assertFalse(1073741823 < 1073741823) -+assertTrue(1073741823 <= 1073741823) -+assertTrue(1073741823 == 1073741823) -+assertTrue(1073741823 >= 1073741823) -+assertFalse(1073741823 > 1073741823) -+ -+assertFalse(-1073741824 < -1073741824) -+assertTrue(-1073741824 <= -1073741824) -+assertTrue(-1073741824 == -1073741824) -+assertTrue(-1073741824 >= -1073741824) -+assertFalse(-1073741824 > -1073741824) --- -1.9.3 diff --git a/v8-3.14.5.8-CVE-2013-2634.patch b/v8-3.14.5.8-CVE-2013-2634.patch deleted file mode 100644 index 6fdafce..0000000 --- a/v8-3.14.5.8-CVE-2013-2634.patch +++ /dev/null @@ -1,134 +0,0 @@ -From 5b1d2144ebd47ea768ca5b3cfcda830433c88efe Mon Sep 17 00:00:00 2001 -From: "T.C. Hollingsworth" -Date: Thu, 21 Mar 2013 17:34:19 -0700 -Subject: [PATCH] backport fix for CVE-2013-2632 from SVN r13964 - ---- - src/objects-inl.h | 3 ++- - src/objects.h | 7 +++++-- - src/parser.cc | 4 ++-- - src/parser.h | 5 ----- - src/stub-cache.cc | 8 ++++---- - 5 files changed, 13 insertions(+), 14 deletions(-) - -diff --git a/src/objects-inl.h b/src/objects-inl.h -index ea5a93f..4834fa6 100644 ---- a/src/objects-inl.h -+++ b/src/objects-inl.h -@@ -3500,8 +3500,9 @@ Code::Flags Code::ComputeFlags(Kind kind, - kind == CALL_IC || - kind == STORE_IC || - kind == KEYED_STORE_IC); -+ ASSERT(argc <= Code::kMaxArguments); - // Compute the bit mask. -- int bits = KindField::encode(kind) -+ unsigned int bits = KindField::encode(kind) - | ICStateField::encode(ic_state) - | TypeField::encode(type) - | ExtraICStateField::encode(extra_ic_state) -diff --git a/src/objects.h b/src/objects.h -index 755dd42..47d7757 100644 ---- a/src/objects.h -+++ b/src/objects.h -@@ -4180,8 +4180,8 @@ class Code: public HeapObject { - // FLAGS_MIN_VALUE and FLAGS_MAX_VALUE are specified to ensure that - // enumeration type has correct value range (see Issue 830 for more details). - enum Flags { -- FLAGS_MIN_VALUE = kMinInt, -- FLAGS_MAX_VALUE = kMaxInt -+ FLAGS_MIN_VALUE = 0, -+ FLAGS_MAX_VALUE = kMaxUInt32 - }; - - #define CODE_KIND_LIST(V) \ -@@ -4644,6 +4644,9 @@ class Code: public HeapObject { - // Signed field cannot be encoded using the BitField class. - static const int kArgumentsCountShift = 14; - static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1); -+ static const int kArgumentsBits = -+ PlatformSmiTagging::kSmiValueSize - Code::kArgumentsCountShift + 1; -+ static const int kMaxArguments = (1 << kArgumentsBits) - 1; - - // This constant should be encodable in an ARM instruction. - static const int kFlagsNotUsedInLookup = -diff --git a/src/parser.cc b/src/parser.cc -index 03e4b03..6da414a 100644 ---- a/src/parser.cc -+++ b/src/parser.cc -@@ -4243,7 +4243,7 @@ ZoneList* Parser::ParseArguments(bool* ok) { - while (!done) { - Expression* argument = ParseAssignmentExpression(true, CHECK_OK); - result->Add(argument, zone()); -- if (result->length() > kMaxNumFunctionParameters) { -+ if (result->length() > Code::kMaxArguments) { - ReportMessageAt(scanner().location(), "too_many_arguments", - Vector::empty()); - *ok = false; -@@ -4420,7 +4420,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle function_name, - - top_scope_->DeclareParameter(param_name, VAR); - num_parameters++; -- if (num_parameters > kMaxNumFunctionParameters) { -+ if (num_parameters > Code::kMaxArguments) { - ReportMessageAt(scanner().location(), "too_many_parameters", - Vector::empty()); - *ok = false; -diff --git a/src/parser.h b/src/parser.h -index 93fd1b8..e36a9b3 100644 ---- a/src/parser.h -+++ b/src/parser.h -@@ -449,11 +449,6 @@ class Parser { - Vector > args); - - private: -- // Limit on number of function parameters is chosen arbitrarily. -- // Code::Flags uses only the low 17 bits of num-parameters to -- // construct a hashable id, so if more than 2^17 are allowed, this -- // should be checked. -- static const int kMaxNumFunctionParameters = 32766; - static const int kMaxNumFunctionLocals = 131071; // 2^17-1 - - enum Mode { -diff --git a/src/stub-cache.cc b/src/stub-cache.cc -index 4119147..8490c7e 100644 ---- a/src/stub-cache.cc -+++ b/src/stub-cache.cc -@@ -617,7 +617,7 @@ Handle StubCache::ComputeCallConstant(int argc, - Handle code = - compiler.CompileCallConstant(object, holder, function, name, check); - code->set_check_type(check); -- ASSERT_EQ(flags, code->flags()); -+ ASSERT(flags == code->flags()); - PROFILE(isolate_, - CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name)); - GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code)); -@@ -655,7 +655,7 @@ Handle StubCache::ComputeCallField(int argc, - Handle code = - compiler.CompileCallField(Handle::cast(object), - holder, index, name); -- ASSERT_EQ(flags, code->flags()); -+ ASSERT(flags == code->flags()); - PROFILE(isolate_, - CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name)); - GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code)); -@@ -692,7 +692,7 @@ Handle StubCache::ComputeCallInterceptor(int argc, - Handle code = - compiler.CompileCallInterceptor(Handle::cast(object), - holder, name); -- ASSERT_EQ(flags, code->flags()); -+ ASSERT(flags == code->flags()); - PROFILE(isolate(), - CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name)); - GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code)); -@@ -721,7 +721,7 @@ Handle StubCache::ComputeCallGlobal(int argc, - CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder); - Handle code = - compiler.CompileCallGlobal(receiver, holder, cell, function, name); -- ASSERT_EQ(flags, code->flags()); -+ ASSERT(flags == code->flags()); - PROFILE(isolate(), - CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name)); - GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code)); --- -1.8.1.4 - diff --git a/v8-3.4.14-CVE-2014-3152.patch b/v8-3.4.14-CVE-2014-3152.patch deleted file mode 100644 index a399c79..0000000 --- a/v8-3.4.14-CVE-2014-3152.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff -up v8-3.14.5.10/src/arm/lithium-codegen-arm.cc.cve20143152 v8-3.14.5.10/src/arm/lithium-codegen-arm.cc ---- v8-3.14.5.10/src/arm/lithium-codegen-arm.cc.cve20143152 2015-04-23 14:51:20.095648219 -0400 -+++ v8-3.14.5.10/src/arm/lithium-codegen-arm.cc 2015-04-23 14:53:28.834149299 -0400 -@@ -3034,7 +3034,8 @@ MemOperand LCodeGen::PrepareKeyedOperand - return MemOperand(base, scratch0(), LSL, shift_size); - } else { - ASSERT_EQ(-1, shift_size); -- return MemOperand(base, scratch0(), LSR, 1); -+ // key can be negative, so using ASR here. -+ return MemOperand(base, scratch0(), ASR, 1); - } - } - diff --git a/v8-314.spec b/v8-314.spec deleted file mode 100644 index c2328c8..0000000 --- a/v8-314.spec +++ /dev/null @@ -1,839 +0,0 @@ -# Hi Googlers! If you're looking in here for patches, nifty. -# You (and everyone else) are welcome to use any of my Chromium spec files and -# patches under the terms of the GPLv2 or later. -# You (and everyone else) are welcome to use any of my V8-specific spec files -# and patches under the terms of the BSD license. -# You (and everyone else) may NOT use my spec files or patches under any other -# terms. -# I hate to be a party-pooper here, but I really don't want to help Google -# make a proprietary browser. There are enough of those already. -# All copyrightable work in these spec files and patches is Copyright 2011 -# Tom Callaway - -# For the 1.2 branch, we use 0s here -# For 1.3+, we use the three digit versions -# Hey, now there are four digits. What do they mean? Popsicle. -%global somajor 3 -%global sominor 14 -%global sobuild 5 -%global sotiny 10 -%global sover %{somajor}.%{sominor}.%{sobuild} -%global truename v8 -# You don't really want to turn this on, because the "v8" package has this, and we'd -# conflict for no good reason. -%global with_python 0 - -Name: %{truename}-314 -Version: %{somajor}.%{sominor}.%{sobuild}.%{sotiny} -Release: 16%{?dist} -Summary: JavaScript Engine -License: BSD -URL: https://developers.google.com/v8/ -# Once found at http://commondatastorage.googleapis.com/chromium-browser-official/ -# Now, we're the canonical source for the tarball. :/ -Source0: v8-%{version}.tar.bz2 -ExclusiveArch: %{ix86} x86_64 %{arm} mips mipsel ppc ppc64 -BuildRequires: scons, readline-devel, libicu-devel -BuildRequires: valgrind-devel -BuildRequires: gcc, gcc-c++ - -#backport fix for CVE-2013-2634 (RHBZ#924495) -Patch1: v8-3.14.5.8-CVE-2013-2634.patch - -#backport fix for CVE-2013-2882 (RHBZ#991116) -Patch2: v8-3.14.5.10-CVE-2013-2882.patch - -#backport fix for CVE-2013-6640 (RHBZ#1039889) -Patch3: v8-3.14.5.10-CVE-2013-6640.patch - -#backport fix for enumeration for objects with lots of properties -# https://codereview.chromium.org/11362182 -Patch4: v8-3.14.5.10-enumeration.patch - -#backport fix for CVE-2013-6640 (RHBZ#1059070) -Patch5: v8-3.14.5.10-CVE-2013-6650.patch - -#backport only applicable fix for CVE-2014-1704 (RHBZ#1077136) -#the other two patches don't affect this version of v8 -Patch6: v8-3.14.5.10-CVE-2014-1704-1.patch - -# use clock_gettime() instead of gettimeofday(), which increases performance -# dramatically on virtual machines -# https://github.com/joyent/node/commit/f9ced08de30c37838756e8227bd091f80ad9cafa -# see above link or head of patch for complete rationale -Patch7: v8-3.14.5.10-use-clock_gettime.patch - -# fix corner case in x64 compare stubs -# fixes bug resulting in an incorrect result when comparing certain integers -# (e.g. 2147483647 > -2147483648 is false instead of true) -# https://code.google.com/p/v8/issues/detail?id=2416 -# https://github.com/joyent/node/issues/7528 -Patch8: v8-3.14.5.10-x64-compare-stubs.patch - -# backport security fix for memory corruption/stack overflow (RHBZ#1125464) -# https://groups.google.com/d/msg/nodejs/-siJEObdp10/2xcqqmTHiEMJ -# https://github.com/joyent/node/commit/530af9cb8e700e7596b3ec812bad123c9fa06356 -Patch9: v8-3.14.5.10-mem-corruption-stack-overflow.patch - -# backport bugfix for x64 MathMinMax: -# Fix x64 MathMinMax for negative untagged int32 arguments. -# An untagged int32 has zeros in the upper half even if it is negative. -# Using cmpq to compare such numbers will incorrectly ignore the sign. -# https://github.com/joyent/node/commit/3530fa9cd09f8db8101c4649cab03bcdf760c434 -Patch10: v8-3.14.5.10-x64-MathMinMax.patch - -# backport bugfix that eliminates unused-local-typedefs warning -# https://github.com/joyent/node/commit/53b4accb6e5747b156be91a2b90f42607e33a7cc -Patch11: v8-3.14.5.10-unused-local-typedefs.patch - -# backport security fix: Fix Hydrogen bounds check elimination -# resolves CVE-2013-6668 (RHBZ#1086120) -# https://github.com/joyent/node/commit/fd80a31e0697d6317ce8c2d289575399f4e06d21 -Patch12: v8-3.14.5.10-CVE-2013-6668.patch - -# backport fix to segfault caused by the above patch -# https://github.com/joyent/node/commit/3122e0eae64c5ab494b29d0a9cadef902d93f1f9 -Patch13: v8-3.14.5.10-CVE-2013-6668-segfault.patch - -# Use system valgrind header -# https://bugzilla.redhat.com/show_bug.cgi?id=1141483 -Patch14: v8-3.14.5.10-system-valgrind.patch - -# Fix issues with abort on uncaught exception -# https://github.com/joyent/node/pull/8666 -# https://github.com/joyent/node/issues/8631 -# https://github.com/joyent/node/issues/8630 -Patch15: v8-3.14.5.10-abort-uncaught-exception.patch - -# Fix unhandled ReferenceError in debug-debugger.js -# https://github.com/joyent/node/commit/0ff51c6e063e3eea9e4d9ea68edc82d935626fc7 -# https://codereview.chromium.org/741683002 -Patch16: v8-3.14.5.10-unhandled-ReferenceError.patch - -# Don't busy loop in CPU profiler thread -# https://github.com/joyent/node/pull/8789 -Patch17: v8-3.14.5.10-busy-loop.patch - -# Log V8 version in profiler log file -# (needed for compatibility with profiler tools) -# https://github.com/joyent/node/pull/9043 -# https://codereview.chromium.org/806143002 -Patch18: v8-3.14.5.10-profiler-log.patch - -# Fix CVE in ARM code -# https://bugzilla.redhat.com/show_bug.cgi?id=1101057 -# https://codereview.chromium.org/219473002 -Patch19: v8-3.4.14-CVE-2014-3152.patch - -# Add REPLACE_INVALID_UTF8 handling that nodejs needs -Patch20: v8-3.14.5.10-REPLACE_INVALID_UTF8.patch - -# mips support (from debian) -Patch21: 0002_mips.patch -Patch22: 0002_mips_r15102_backport.patch -Patch23: 0002_mips_r19121_backport.patch - -# Forced whole instruction cache flushing on Loongson (from debian) -Patch24: 0012_loongson_force_cache_flush.patch - -# ppc/ppc64 support (from Ubuntu, who got it from IBM) -# Rediffed from 0099_powerpc_support.patch -Patch25: v8-powerpc-support.patch - -# Fix for CVE-2016-1669 (thanks to bhoordhuis) -Patch26: v8-3.14.5.10-CVE-2016-1669.patch - -# Report builtins by name -# https://github.com/nodejs/node/commit/5a60e0d904c38c2bdb04785203b1b784967c870d -Patch27: v8-3.14.5.10-report-builtins-by-name.patch - -# Fix compile with gcc7 -# (thanks to Ben Noordhuis) -Patch28: v8-3.14.5.10-gcc7.patch - -# MOAR PPC -Patch29: v8-powerpc-support-SConstruct.patch - -# GCC8 HAPPY FUN TIME -Patch30: v8-3.14.5.10-gcc8.patch - -%description -V8 is Google's open source JavaScript engine. V8 is written in C++ and is used -in Google Chrome, the open source browser from Google. V8 implements ECMAScript -as specified in ECMA-262, 3rd edition. This is version 3.14, which is no longer -maintained by Google, but was adopted by a lot of other software. - -%package devel -Summary: Development headers and libraries for v8 -Requires: %{name}%{?_isa} = %{version}-%{release} - -%description devel -Development headers and libraries for v8 3.14. - -%if 0%{?with_python} -%package python -Summary: Python libraries from v8 -Requires: %{name}%{?_isa} = %{version}-%{release} - -%description python -Python libraries from v8. -%endif - -%prep -%setup -q -n %{truename}-%{version} -%patch1 -p1 -%patch2 -p1 -%patch3 -p1 -%patch4 -p1 -%patch5 -p1 -%patch6 -p1 -%patch7 -p1 -%patch8 -p1 -%patch9 -p1 -%patch10 -p1 -%patch11 -p1 -%patch12 -p1 -%patch13 -p1 -%patch14 -p1 -b .system-valgrind -%patch15 -p1 -b .abort-uncaught-exception -%patch16 -p1 -b .unhandled-ReferenceError -%patch17 -p1 -b .busy-loop -%patch18 -p1 -b .profiler-log -%patch19 -p1 -b .cve20143152 -%patch20 -p1 -b .riu -%patch21 -p1 -b .mips -%patch22 -p1 -b .r15102 -%patch23 -p1 -b .r19121 -%patch24 -p1 -b .loongson -%patch25 -p1 -b .ppc -%patch26 -p1 -b .CVE-2016-1669 -%patch27 -p1 -b .builtinname -%patch28 -p1 -b .gcc7 -%patch29 -p1 -b .ppc-harder -%patch30 -p1 -b .gcc8 - -# Do not need this lying about. -rm -rf src/third_party/valgrind - -#Patch7 needs -lrt on glibc < 2.17 (RHEL <= 6) -%if (0%{?rhel} > 6 || 0%{?fedora} > 18) -%global lrt %{nil} -%else -%global lrt -lrt -%endif - -# -fno-strict-aliasing is needed with gcc 4.4 to get past some ugly code -PARSED_OPT_FLAGS=`echo \'$RPM_OPT_FLAGS %{lrt} -fPIC -fno-strict-aliasing -Wno-unused-parameter -Wno-error=strict-overflow -Wno-unused-but-set-variable -Wno-error=cast-function-type -Wno-error=class-memaccess -fno-delete-null-pointer-checks\'| sed "s/ /',/g" | sed "s/',/', '/g"` -sed -i "s|'-O3',|$PARSED_OPT_FLAGS,|g" SConstruct - -# clear spurious executable bits -find . \( -name \*.cc -o -name \*.h -o -name \*.py \) -a -executable \ - |while read FILE ; do - echo $FILE - chmod -x $FILE - done - -%build -mkdir -p obj/release/ -export GCC_VERSION="44" - -# SCons is going away, but for now build with -# I_know_I_should_build_with_GYP=yes -scons library=shared snapshots=on \ -%ifarch x86_64 -arch=x64 \ -%endif -%ifarch ppc64 -arch=ppc64 \ -%endif -%ifarch ppc -arch=ppc \ -%endif -%ifarch armv7hl armv7hnl -armeabi=hard \ -%endif -%ifarch armv5tel armv6l armv7l -armeabi=soft \ -%endif -visibility=default \ -env=CCFLAGS:"-fPIC" \ -I_know_I_should_build_with_GYP=yes - -%if 0%{?fedora} >= 16 -export ICU_LINK_FLAGS=`pkg-config --libs-only-l icu-i18n` -%else -export ICU_LINK_FLAGS=`pkg-config --libs-only-l icu` -%endif - -# When will people learn to create versioned shared libraries by default? -# first, lets get rid of the old .so file -rm -rf libv8.so libv8preparser.so -# Now, lets make it right. -g++ $RPM_OPT_FLAGS -fPIC -o libv8preparser.so.%{sover} -shared -Wl,-soname,libv8preparser.so.%{somajor} \ - obj/release/allocation.os \ - obj/release/bignum.os \ - obj/release/bignum-dtoa.os \ - obj/release/cached-powers.os \ - obj/release/diy-fp.os \ - obj/release/dtoa.os \ - obj/release/fast-dtoa.os \ - obj/release/fixed-dtoa.os \ - obj/release/preparse-data.os \ - obj/release/preparser-api.os \ - obj/release/preparser.os \ - obj/release/scanner.os \ - obj/release/strtod.os \ - obj/release/token.os \ - obj/release/unicode.os \ - obj/release/utils.os - -# "obj/release/preparser-api.os" should not be included in the libv8.so file. -export RELEASE_BUILD_OBJS=`echo obj/release/*.os | sed 's|obj/release/preparser-api.os||g'` - -%ifarch %{arm} -g++ $RPM_OPT_FLAGS -fPIC -o libv8.so.%{sover} -shared -Wl,-soname,libv8.so.%{somajor} $RELEASE_BUILD_OBJS obj/release/extensions/*.os obj/release/arm/*.os $ICU_LINK_FLAGS -%endif -%ifarch %{ix86} -g++ $RPM_OPT_FLAGS -fPIC -o libv8.so.%{sover} -shared -Wl,-soname,libv8.so.%{somajor} $RELEASE_BUILD_OBJS obj/release/extensions/*.os obj/release/ia32/*.os $ICU_LINK_FLAGS -%endif -%ifarch x86_64 -g++ $RPM_OPT_FLAGS -fPIC -o libv8.so.%{sover} -shared -Wl,-soname,libv8.so.%{somajor} $RELEASE_BUILD_OBJS obj/release/extensions/*.os obj/release/x64/*.os $ICU_LINK_FLAGS -%endif -%ifarch mips -g++ $RPM_OPT_FLAGS -fPIC -o libv8.so.%{sover} -shared -Wl,-soname,libv8.so.%{somajor} $RELEASE_BUILD_OBJS obj/release/extensions/*.os obj/release/mips/*.os $ICU_LINK_FLAGS -%endif -%ifarch mipsel -g++ $RPM_OPT_FLAGS -fPIC -o libv8.so.%{sover} -shared -Wl,-soname,libv8.so.%{somajor} $RELEASE_BUILD_OBJS obj/release/extensions/*.os obj/release/mipsel/*.os $ICU_LINK_FLAGS -%endif -%ifarch ppc ppc64 -g++ $RPM_OPT_FLAGS -fPIC -o libv8.so.%{sover} -shared -Wl,-soname,libv8.so.%{somajor} $RELEASE_BUILD_OBJS obj/release/extensions/*.os obj/release/ppc/*.os $ICU_LINK_FLAGS -%endif - - -# We need to do this so d8 can link against it. -ln -sf libv8.so.%{sover} libv8.so -ln -sf libv8preparser.so.%{sover} libv8preparser.so - -# This will fail to link d8 because it doesn't use the icu libs. -# Don't build d8 shared. Stupid Google. Hate. -# SCons is going away, but for now build with -# I_know_I_should_build_with_GYP=yes -scons d8 \ -I_know_I_should_build_with_GYP=yes \ -%ifarch x86_64 -arch=x64 \ -%endif -%ifarch armv7hl armv7hnl -armeabi=hard \ -%endif -%ifarch armv5tel armv6l armv7l -armeabi=soft \ -%endif -%ifarch ppc64 -arch=ppc64 \ -%endif -%ifarch ppc -arch=ppc \ -%endif -snapshots=on console=readline visibility=default || : -# library=shared snapshots=on console=readline visibility=default || : - -# Sigh. I f*****g hate scons. -# But gyp is worse. -# rm -rf d8 - -# g++ $RPM_OPT_FLAGS -o d8 obj/release/d8.os -lreadline -lpthread -L. -lv8 $ICU_LINK_FLAGS - -%install -rm -rf %{buildroot} -mkdir -p %{buildroot}%{_includedir}/v8-3.14/ -mkdir -p %{buildroot}%{_libdir} -install -p include/*.h %{buildroot}%{_includedir}/v8-3.14/ -install -p libv8.so.%{sover} %{buildroot}%{_libdir} -install -p libv8preparser.so.%{sover} %{buildroot}%{_libdir} -mkdir -p %{buildroot}%{_bindir} -install -p -m0755 d8 %{buildroot}%{_bindir}/d8-314 - -pushd %{buildroot}%{_libdir} -ln -sf libv8.so.%{sover} libv8.so -ln -sf libv8.so.%{sover} libv8.so.%{somajor} -ln -sf libv8.so.%{sover} libv8.so.%{somajor}.%{sominor} -ln -sf libv8preparser.so.%{sover} libv8preparser.so -ln -sf libv8preparser.so.%{sover} libv8preparser.so.%{somajor} -ln -sf libv8preparser.so.%{sover} libv8preparser.so.%{somajor}.%{sominor} -popd - -chmod -x %{buildroot}%{_includedir}/v8-3.14/v8*.h - -mkdir -p %{buildroot}%{_includedir}/v8-3.14/v8/extensions/ -install -p src/extensions/*.h %{buildroot}%{_includedir}/v8-3.14/v8/extensions/ - -chmod -x %{buildroot}%{_includedir}/v8-3.14/v8/extensions/*.h - -%if 0%{?with_python} -# install Python JS minifier scripts for nodejs -install -d %{buildroot}%{python_sitelib} -sed -i 's|/usr/bin/python2.4|/usr/bin/env python|g' tools/jsmin.py -sed -i 's|/usr/bin/python2.4|/usr/bin/env python|g' tools/js2c.py -install -p -m0744 tools/jsmin.py %{buildroot}%{python_sitelib}/ -install -p -m0744 tools/js2c.py %{buildroot}%{python_sitelib}/ -chmod -R -x %{buildroot}%{python_sitelib}/*.py* -%endif - -%ldconfig_scriptlets - -%files -%doc AUTHORS ChangeLog -%license LICENSE -%{_bindir}/d8-314 -%{_libdir}/*.so.* - -%files devel -%{_includedir}/v8-3.14/ -%{_libdir}/*.so - -%if 0%{?with_python} -%files python -%{python2_sitelib}/j*.py* -%endif - -%changelog -* Sun Feb 17 2019 Igor Gnatenko - 3.14.5.10-16 -- Rebuild for readline 8.0 - -* Sun Feb 03 2019 Fedora Release Engineering - 3.14.5.10-15 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild - -* Wed Jan 23 2019 Pete Walter - 3.14.5.10-14 -- Rebuild for ICU 63 - -* Tue Jul 24 2018 Tom Callaway - 3.14.5.10-13 -- add BuildRequires: gcc, gcc-c++ - -* Sat Jul 14 2018 Fedora Release Engineering - 3.14.5.10-12 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild - -* Tue Jul 10 2018 Pete Walter - 3.14.5.10-11 -- Rebuild for ICU 62 - -* Fri May 18 2018 Tom Callaway - 3.14.5.10-10 -- fix build of this dinosaur with gcc8, mostly by telling the compiler to ignore all the nasty code errors - -* Fri Feb 09 2018 Fedora Release Engineering - 3.14.5.10-9 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_28_Mass_Rebuild - -* Thu Nov 30 2017 Pete Walter - 3.14.5.10-8 -- Rebuild for ICU 60.1 - -* Thu Aug 03 2017 Fedora Release Engineering - 3.14.5.10-7 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild - -* Thu Jul 27 2017 Fedora Release Engineering - 3.14.5.10-6 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild - -* Tue Feb 28 2017 Tom Callaway - 3.14.5.10-5 -- fix FTBFS (thanks to Ben Noordhuis) - -* Sat Feb 11 2017 Fedora Release Engineering - 3.14.5.10-4 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild - -* Mon Jul 25 2016 Tom Callaway - 3.14.5.10-3 -- drop epoch (new package, doesn't need it) - -* Wed Jul 6 2016 Tom Callaway - 1:3.14.5.10-2 -- apply fixes from nodejs for CVE-2016-1669 and reporting builtins by name - -* Tue Jun 7 2016 Tom Callaway - 1:3.14.5.10-1 -- make into v8-314 package - -* Mon Jun 06 2016 Vít Ondruch - 1:3.14.5.10-24 -- Use "-fno-delete-null-pointer-checks" to workaround GCC 6.x compatibility - (rhbz#1331480, rhbz#1331458). - -* Fri Feb 05 2016 Fedora Release Engineering - 1:3.14.5.10-23 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild - -* Wed Oct 28 2015 David Tardon - 1:3.14.5.10-22 -- rebuild for ICU 56.1 - -* Mon Sep 21 2015 Tom Callaway - 1:3.14.5.10-21 -- add REPLACE_INVALID_UTF8 code needed for nodejs - -* Fri Jun 19 2015 Fedora Release Engineering - 1:3.14.5.10-20 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild - -* Mon Jun 8 2015 Tom Callaway - 1:3.14.5.10-19 -- split off python subpackage (bz 959145) - -* Thu Apr 23 2015 Tom Callaway - 1:3.14.5.10-18 -- backport security fix for ARM - CVE-2014-3152 - -* Thu Feb 19 2015 T.C. Hollingsworth - 1:3.14.5.10-17 -- backports for nodejs 0.10.36 - -* Mon Jan 26 2015 David Tardon - 1:3.14.5.10-16 -- rebuild for ICU 54.1 - -* Tue Dec 2 2014 Tom Callaway - 1:3.14.5.10-15 -- use system valgrind header (bz1141483) - -* Wed Sep 17 2014 T.C. Hollingsworth - 1:3.14.5.10-14 -- backport bugfix that eliminates unused-local-typedefs warning -- backport security fix: Fix Hydrogen bounds check elimination (CVE-2013-6668; RHBZ#1086120) -- backport fix to segfault caused by the above patch - -* Tue Aug 26 2014 David Tardon - 1:3.14.5.10-13 -- rebuild for ICU 53.1 - -* Mon Aug 18 2014 Fedora Release Engineering - 1:3.14.5.10-12 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild - -* Thu Jul 31 2014 T.C. Hollingsworth - 1:3.14.5.10-11 -- backport security fix for memory corruption and stack overflow (RHBZ#1125464) - https://groups.google.com/d/msg/nodejs/-siJEObdp10/2xcqqmTHiEMJ -- backport bug fix for x64 MathMinMax for negative untagged int32 arguments. - https://github.com/joyent/node/commit/3530fa9cd09f8db8101c4649cab03bcdf760c434 - -* Thu Jun 19 2014 T.C. Hollingsworth - 1:3.14.5.10-10 -- fix corner case in integer comparisons (v8 bug#2416; nodejs bug#7528) - -* Sun Jun 08 2014 Fedora Release Engineering - 1:3.14.5.10-9 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild - -* Sat May 03 2014 T.C. Hollingsworth - 1:3.14.5.10-8 -- use clock_gettime() instead of gettimeofday(), which increases V8 performance - dramatically on virtual machines - -* Tue Mar 18 2014 T.C. Hollingsworth - 1:3.14.5.10-7 -- backport fix for unsigned integer arithmetic (RHBZ#1077136; CVE-2014-1704) - -* Mon Feb 24 2014 Tomas Hrcka - 1:3.14.5.10-6 -- Backport fix for incorrect handling of popular pages (RHBZ#1059070; CVE-2013-6640) - -* Fri Feb 14 2014 T.C. Hollingsworth - 1:3.14.5.10-5 -- rebuild for icu-52 - -* Mon Jan 27 2014 T.C. Hollingsworth - 1:3.14.5.10-4 -- backport fix for enumeration for objects with lots of properties - -* Fri Dec 13 2013 T.C. Hollingsworth - 1:3.14.5.10-3 -- backport fix for out-of-bounds read DoS (RHBZ#1039889; CVE-2013-6640) - -* Fri Aug 02 2013 T.C. Hollingsworth - 1:3.14.5.10-2 -- backport fix for remote DoS or unspecified other impact via type confusion - (RHBZ#991116; CVE-2013-2882) - -* Wed May 29 2013 T.C. Hollingsworth - 1:3.14.5.10-1 -- new upstream release 3.14.5.10 - -* Mon May 06 2013 Stanislav Ochotnicky - 1:3.14.5.8-2 -- Fix ownership of include directory (#958729) - -* Fri Mar 22 2013 T.C. Hollingsworth - 1:3.14.5.8-1 -- new upstream release 3.14.5.8 -- backport security fix for remote DoS via crafted javascript (RHBZ#924495; CVE-2013-2632) - -* Mon Mar 11 2013 Stephen Gallagher - 1:3.14.5.7-3 -- Update to v8 3.14.5.7 for Node.js 0.10.0 - -* Sat Jan 26 2013 T.C. Hollingsworth - 1:3.13.7.5-2 -- rebuild for icu-50 -- ignore new GCC 4.8 warning - -* Tue Dec 4 2012 Tom Callaway - 1:3.13.7.5-1 -- update to 3.13.7.5 (needed for chromium 23) -- Resolves multiple security issues (CVE-2012-5120, CVE-2012-5128) -- d8 is now using a static libv8, resolves bz 881973) - -* Sun Jul 22 2012 Fedora Release Engineering - 1:3.10.8-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild - -* Fri Jul 6 2012 Tom Callaway 1:3.10.8-1 -- update to 3.10.8 (chromium 20) - -* Tue Jun 12 2012 Tom Callaway 1:3.9.24-1 -- update to 3.9.24 (chromium 19) - -* Mon Apr 23 2012 Thomas Spura 1:3.7.12.6 -- rebuild for icu-49 - -* Fri Mar 30 2012 Dennis Gilmore 1:3.7.12-5 -- make sure the right arm abi is used in the second call of scons - -* Thu Mar 29 2012 Dennis Gilmore 1:3.7.12-4 -- use correct arm macros -- use the correct abis for hard and soft float - -* Tue Mar 20 2012 Tom Callaway 3.7.12-3 -- merge changes from Fedora spec file, sync, add epoch - -* Fri Feb 17 2012 Tom Callaway 3.7.12-2 -- add -Wno-error=strict-overflow for gcc 4.7 (hack, hack, hack) - -* Mon Feb 13 2012 Tom Callaway 3.7.12-1 -- update to 3.7.12 - -* Thu Nov 3 2011 Tom Callaway 3.5.10-1 -- update to 3.5.10 - -* Mon Sep 26 2011 Tom Callaway 3.4.14-2 -- final 3.4.14 tag -- include JavaScript minifier scripts in -devel - -* Fri Jun 10 2011 Tom Callaway 3.2.10-1 -- tag 3.2.10 - -* Thu Apr 28 2011 Tom Callaway 3.1.8-1 -- "stable" v8 match for "stable" chromium (tag 3.1.8) - -* Tue Feb 22 2011 Tom Callaway 3.1.5-1.20110222svn6902 -- update to 3.1.5 -- enable experimental i18n icu stuff for chromium - -* Tue Jan 11 2011 Tom Callaway 3.0.7-1.20110111svn6276 -- update to 3.0.7 - -* Tue Dec 14 2010 Tom "spot" Callaway 3.0.0-2.20101209svn5957 -- fix sloppy code where NULL is used - -* Thu Dec 9 2010 Tom "spot" Callaway 3.0.0-1.20101209svn5957 -- update to 3.0.0 - -* Fri Oct 22 2010 Tom "spot" Callaway 2.5.1-1.20101022svn5692 -- update to 2.5.1 -- fix another fwrite with no return checking case - -* Thu Oct 14 2010 Tom "spot" Callaway 2.5.0-1.20101014svn5625 -- update to 2.5.0 - -* Mon Oct 4 2010 Tom "spot" Callaway 2.4.8-1.20101004svn5585 -- update to 2.4.8 - -* Tue Sep 14 2010 Tom "spot" Callaway 2.4.3-1.20100914svn5450 -- update to 2.4.3 - -* Tue Aug 31 2010 Tom "spot" Callaway 2.3.11-1.20100831svn5385 -- update to svn5385 - -* Fri Aug 27 2010 Tom "spot" Callaway 2.3.11-1.20100827svn5365 -- update to 2.3.11, svn5365 - -* Tue Aug 24 2010 Tom "spot" Callaway 2.3.10-1.20100824svn5332 -- update to 2.3.10, svn5332 - -* Wed Aug 18 2010 Tom "spot" Callaway 2.3.9-1.20100819svn5308 -- update to 2.3.9, svn5308 - -* Wed Aug 11 2010 Tom "spot" Callaway 2.3.7-1.20100812svn5251 -- update to svn5251 - -* Wed Aug 11 2010 Tom "spot" Callaway 2.3.7-1.20100811svn5248 -- update to 2.3.7, svn5248 - -* Tue Aug 10 2010 Tom "spot" Callaway 2.3.6-1.20100809svn5217 -- update to 2.3.6, svn5217 - -* Fri Aug 6 2010 Tom "spot" Callaway 2.3.5-1.20100806svn5198 -- update to 2.3.5, svn5198 - -* Mon Jul 26 2010 Tom "spot" Callaway 2.3.3-1.20100726svn5134 -- update to 2.3.3, svn5134 - -* Fri Jul 16 2010 Tom "spot" Callaway 2.3.0-1.20100716svn5088 -- update to 2.3.0, svn5088 - -* Tue Jul 6 2010 Tom "spot" Callaway 2.2.22-1.20100706svn5023 -- update to 2.2.22, svn5023 - -* Fri Jul 2 2010 Tom "spot" Callaway 2.2.21-1.20100702svn5010 -- update to svn5010 - -* Wed Jun 30 2010 Tom "spot" Callaway 2.2.21-1.20100630svn4993 -- update to 2.2.21, svn4993 -- include checkout script - -* Thu Jun 3 2010 Tom "spot" Callaway 2.2.14-1.20100603svn4792 -- update to 2.2.14, svn4792 - -* Tue Jun 1 2010 Tom "spot" Callaway 2.2.13-1.20100601svn4772 -- update to 2.2.13, svn4772 - -* Thu May 27 2010 Tom "spot" Callaway 2.2.12-1.20100527svn4747 -- update to 2.2.12, svn4747 - -* Tue May 25 2010 Tom "spot" Callaway 2.2.11-1.20100525svn4718 -- update to 2.2.11, svn4718 - -* Thu May 20 2010 Tom "spot" Callaway 2.2.10-1.20100520svn4684 -- update to svn4684 - -* Mon May 17 2010 Tom "spot" Callaway 2.2.10-1.20100517svn4664 -- update to 2.2.10, svn4664 - -* Thu May 13 2010 Tom "spot" Callaway 2.2.9-1.20100513svn4653 -- update to svn4653 - -* Mon May 10 2010 Tom "spot" Callaway 2.2.9-1.20100510svn4636 -- update to 2.2.9, svn4636 - -* Tue May 4 2010 Tom "spot" Callaway 2.2.7-1.20100504svn4581 -- update to 2.2.7, svn4581 - -* Mon Apr 19 2010 Tom "spot" Callaway 2.2.3-1.20100419svn4440 -- update to 2.2.3, svn4440 - -* Tue Apr 13 2010 Tom "spot" Callaway 2.2.2-1.20100413svn4397 -- update to 2.2.2, svn4397 - -* Thu Apr 8 2010 Tom "spot" Callaway 2.2.1-1.20100408svn4359 -- update to 2.2.1, svn4359 - -* Mon Mar 29 2010 Tom "spot" Callaway 2.2.0-1.20100329svn4309 -- update to 2.2.0, svn4309 - -* Thu Mar 25 2010 Tom "spot" Callaway 2.1.8-1.20100325svn4273 -- update to 2.1.8, svn4273 - -* Mon Mar 22 2010 Tom "spot" Callaway 2.1.5-1.20100322svn4204 -- update to 2.1.5, svn4204 - -* Mon Mar 15 2010 Tom "spot" Callaway 2.1.4-1.20100315svn4129 -- update to 2.1.4, svn4129 - -* Wed Mar 10 2010 Tom "spot" Callaway 2.1.0-1.20100310svn4088 -- update to 2.1.3, svn4088 - -* Thu Feb 18 2010 Tom "spot" Callaway 2.1.0-1.20100218svn3902 -- update to 2.1.0, svn3902 - -* Fri Jan 22 2010 Tom "spot" Callaway 2.0.6-1.20100122svn3681 -- update to 2.0.6, svn3681 - -* Tue Dec 29 2009 Tom "spot" Callaway 2.0.5-1.20091229svn3528 -- svn3528 - -* Mon Dec 21 2009 Tom "spot" Callaway 2.0.5-1.20091221svn3511 -- update to 2.0.5, svn3511 - -* Wed Dec 9 2009 Tom "spot" Callaway 2.0.3-1.20091209svn3443 -- update to 2.0.3, svn3443 - -* Tue Nov 24 2009 Tom "spot" Callaway 2.0.2-1.20091124svn3353 -- update to 2.0.2, svn3353 - -* Wed Nov 18 2009 Tom "spot" Callaway 2.0.0-1.20091118svn3334 -- update to 2.0.0, svn3334 - -* Tue Oct 27 2009 Tom "spot" Callaway 1.3.16-1.20091027svn3152 -- update to 1.3.16, svn3152 - -* Tue Oct 13 2009 Tom "spot" Callaway 1.3.15-1.20091013svn3058 -- update to svn3058 - -* Thu Oct 8 2009 Tom "spot" Callaway 1.3.15-1.20091008svn3036 -- update to 1.3.15, svn3036 - -* Tue Sep 29 2009 Tom "spot" Callaway 1.3.13-1.20090929svn2985 -- update to svn2985 -- drop unused parameter patch, figured out how to work around it with optflag mangling -- have I mentioned lately that scons is garbage? - -* Mon Sep 28 2009 Tom "spot" Callaway 1.3.13-1.20090928svn2980 -- update to 1.3.13, svn2980 - -* Wed Sep 16 2009 Tom "spot" Callaway 1.3.11-1.20090916svn2903 -- update to 1.3.11, svn2903 - -* Wed Sep 9 2009 Tom "spot" Callaway 1.3.9-1.20090909svn2862 -- update to 1.3.9, svn2862 - -* Thu Aug 27 2009 Tom "spot" Callaway 1.3.8-1.20090827svn2777 -- update to 1.3.8, svn2777 - -* Mon Aug 24 2009 Tom "spot" Callaway 1.3.6-1.20090824svn2747 -- update to 1.3.6, svn2747 - -* Tue Aug 18 2009 Tom "spot" Callaway 1.3.4-1.20090818svn2708 -- update to svn2708, build and package d8 - -* Fri Aug 14 2009 Tom "spot" Callaway 1.3.4-1.20090814svn2692 -- update to 1.3.4, svn2692 - -* Wed Aug 12 2009 Tom "spot" Callaway 1.3.3-1.20090812svn2669 -- update to 1.3.3, svn2669 - -* Mon Aug 10 2009 Tom "spot" Callaway 1.3.2-1.20090810svn2658 -- update to svn2658 - -* Fri Aug 7 2009 Tom "spot" Callaway 1.3.2-1.20090807svn2653 -- update to svn2653 - -* Wed Aug 5 2009 Tom "spot" Callaway 1.3.2-1.20090805svn2628 -- update to 1.3.2, svn2628 - -* Mon Aug 3 2009 Tom "spot" Callaway 1.3.1-1.20090803svn2607 -- update to svn2607 - -* Fri Jul 31 2009 Tom "spot" Callaway 1.3.1-1.20090731svn2602 -- update to svn2602 - -* Thu Jul 30 2009 Tom "spot" Callaway 1.3.1-1.20090730svn2592 -- update to 1.3.1, svn 2592 - -* Mon Jul 27 2009 Tom "spot" Callaway 1.3.0-1.20090727svn2543 -- update to 1.3.0, svn 2543 - -* Fri Jul 24 2009 Tom "spot" Callaway 1.2.14-1.20090724svn2534 -- update to svn2534 - -* Mon Jul 20 2009 Tom "spot" Callaway 1.2.14-1.20090720svn2510 -- update to svn2510 - -* Thu Jul 16 2009 Tom "spot" Callaway 1.2.14-1.20090716svn2488 -- update to svn2488 - -* Wed Jul 15 2009 Tom "spot" Callaway 1.2.14-1.20090715svn2477 -- update to 1.2.14, svn2477 - -* Mon Jul 13 2009 Tom "spot" Callaway 1.2.13-1.20090713svn2434 -- update to svn2434 - -* Sat Jul 11 2009 Tom "spot" Callaway 1.2.13-1.20090711svn2430 -- update to 1.2.13, svn2430 - -* Wed Jul 8 2009 Tom "spot" Callaway 1.2.12-1.20090708svn2391 -- update to 1.2.12, svn2391 - -* Sat Jul 4 2009 Tom "spot" Callaway 1.2.11-1.20090704svn2356 -- update to 1.2.11, svn2356 - -* Fri Jun 26 2009 Tom "spot" Callaway 1.2.9-1.20090626svn2284 -- update to svn2284 - -* Wed Jun 24 2009 Tom "spot" Callaway 1.2.9-1.20090624svn2262 -- update to 1.2.9, svn2262 - -* Thu Jun 18 2009 Tom "spot" Callaway 1.2.7-2.20090618svn2219 -- fix unused-parameter patch - -* Thu Jun 18 2009 Tom "spot" Callaway 1.2.7-1.20090618svn2219 -- update to 1.2.8, svn2219 - -* Mon Jun 8 2009 Tom "spot" Callaway 1.2.7-2.20090608svn2123 -- fix gcc44 compile for Fedora 11 - -* Mon Jun 8 2009 Tom "spot" Callaway 1.2.7-1.20090608svn2123 -- update to 1.2.7, svn2123 - -* Thu May 28 2009 Tom "spot" Callaway 1.2.5-1.20090528svn2072 -- update to newer svn checkout - -* Sun Feb 22 2009 Tom "spot" Callaway 1.0.1-1.20090222svn1332 -- update to newer svn checkout - -* Sun Sep 14 2008 Tom "spot" Callaway 0.2-2.20080914svn300 -- make a versioned shared library properly - -* Sun Sep 14 2008 Tom "spot" Callaway 0.2-1.20080914svn300 -- Initial package for Fedora - diff --git a/v8-powerpc-support-SConstruct.patch b/v8-powerpc-support-SConstruct.patch deleted file mode 100644 index b17a222..0000000 --- a/v8-powerpc-support-SConstruct.patch +++ /dev/null @@ -1,1640 +0,0 @@ -diff -up v8-3.14.5.10/SConstruct.ppc-harder v8-3.14.5.10/SConstruct ---- v8-3.14.5.10/SConstruct.ppc-harder 2012-10-22 09:09:53.000000000 -0400 -+++ v8-3.14.5.10/SConstruct 2017-03-01 12:47:36.529605806 -0500 -@@ -143,6 +143,12 @@ LIBRARY_FLAGS = { - 'CCFLAGS': ['-m32'], - 'LINKFLAGS': ['-m32'] - }, -+ 'arch:ppc': { -+ 'CPPDEFINES': ['V8_TARGET_ARCH_PPC'], -+ }, -+ 'arch:ppc64': { -+ 'CPPDEFINES': ['V8_TARGET_ARCH_PPC64', 'V8_TARGET_ARCH_PPC'], -+ }, - 'arch:arm': { - 'CPPDEFINES': ['V8_TARGET_ARCH_ARM'], - 'unalignedaccesses:on' : { -@@ -994,7 +1000,7 @@ def GuessStrictAliasing(env): - - PLATFORM_OPTIONS = { - 'arch': { -- 'values': ['arm', 'ia32', 'x64', 'mips'], -+ 'values': ['arm', 'ia32', 'x64', 'mips', 'ppc64', 'ppc'], - 'guess': GuessArch, - 'help': 'the architecture to build for' - }, -diff -up v8-3.14.5.10/src/ppc/assembler-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/assembler-ppc.cc ---- v8-3.14.5.10/src/ppc/assembler-ppc.cc.ppc-harder 2017-03-01 12:47:36.471607257 -0500 -+++ v8-3.14.5.10/src/ppc/assembler-ppc.cc 2017-03-01 12:47:36.516606131 -0500 -@@ -72,7 +72,7 @@ static bool is_processor(const char* p) - - read_tried = true; - if (fd != -1) { --#if V8_TARGET_ARCH_PPC64 -+#if defined(V8_TARGET_ARCH_PPC64) - static Elf64_auxv_t buffer[16]; - Elf64_auxv_t *auxv_element; - #else -@@ -359,7 +359,7 @@ Register Assembler::GetRB(Instr instr) { - return reg; - } - --#if V8_TARGET_ARCH_PPC64 -+#if defined(V8_TARGET_ARCH_PPC64) - // This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori) - bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, - Instr instr3, Instr instr4, Instr instr5) { -@@ -392,7 +392,7 @@ bool Assembler::IsRlwinm(Instr instr) { - return ((instr & kOpcodeMask) == RLWINMX); - } - --#if V8_TARGET_ARCH_PPC64 -+#if defined(V8_TARGET_ARCH_PPC64) - bool Assembler::IsRldicl(Instr instr) { - return (((instr & kOpcodeMask) == EXT5) && - ((instr & kExt5OpcodeMask) == RLDICL)); -@@ -903,7 +903,7 @@ void Assembler::orx(Register dst, Regist - - void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) { - intptr_t imm16 = src2.imm_; --#if V8_TARGET_ARCH_PPC64 -+#if defined(V8_TARGET_ARCH_PPC64) - int L = 1; - #else - int L = 0; -@@ -916,7 +916,7 @@ void Assembler::cmpi(Register src1, cons - - void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) { - uintptr_t uimm16 = src2.imm_; --#if V8_TARGET_ARCH_PPC64 -+#if defined(V8_TARGET_ARCH_PPC64) - int L = 1; - #else - int L = 0; -@@ -928,7 +928,7 @@ void Assembler::cmpli(Register src1, con - } - - void Assembler::cmp(Register src1, Register src2, CRegister cr) { --#if V8_TARGET_ARCH_PPC64 -+#if defined(V8_TARGET_ARCH_PPC64) - int L = 1; - #else - int L = 0; -@@ -939,7 +939,7 @@ void Assembler::cmp(Register src1, Regis - } - - void Assembler::cmpl(Register src1, Register src2, CRegister cr) { --#if V8_TARGET_ARCH_PPC64 -+#if defined(V8_TARGET_ARCH_PPC64) - int L = 1; - #else - int L = 0; -@@ -1027,7 +1027,7 @@ void Assembler::lwzux(Register rt, const - } - - void Assembler::lwa(Register dst, const MemOperand &src) { --#if V8_TARGET_ARCH_PPC64 -+#if defined(V8_TARGET_ARCH_PPC64) - int offset = src.offset(); - ASSERT(!src.ra_.is(r0)); - ASSERT(!(offset & 3) && is_int16(offset)); -@@ -1116,7 +1116,7 @@ void Assembler::andc(Register dst, Regis - x_form(EXT2 | ANDCX, dst, src1, src2, rc); - } - --#if V8_TARGET_ARCH_PPC64 -+#if defined(V8_TARGET_ARCH_PPC64) - // 64bit specific instructions - void Assembler::ld(Register rd, const MemOperand &src) { - int offset = src.offset(); -@@ -1273,7 +1273,7 @@ void Assembler::marker_asm(int mcode) { - // TOC and static chain are ignored and set to 0. - void Assembler::function_descriptor() { - RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); --#if V8_TARGET_ARCH_PPC64 -+#if defined(V8_TARGET_ARCH_PPC64) - uint64_t value = reinterpret_cast(pc_) + 3 * kPointerSize; - #if __BYTE_ORDER == __LITTLE_ENDIAN - emit(static_cast(value & 0xFFFFFFFF)); -@@ -1307,7 +1307,7 @@ void Assembler::mov(Register dst, const - RecordRelocInfo(src.rmode_, src.imm_); - } - --#if V8_TARGET_ARCH_PPC64 -+#if defined(V8_TARGET_ARCH_PPC64) - int64_t value = src.immediate(); - int32_t hi_32 = static_cast(value) >> 32; - int32_t lo_32 = static_cast(value); -@@ -1394,7 +1394,7 @@ void Assembler::info(const char* msg, Co - CRegister cr) { - if (::v8::internal::FLAG_trace_sim_stubs) { - emit(0x7d9ff808); --#if V8_TARGET_ARCH_PPC64 -+#if defined(V8_TARGET_ARCH_PPC64) - uint64_t value = reinterpret_cast(msg); - emit(static_cast(value >> 32)); - emit(static_cast(value & 0xFFFFFFFF)); -@@ -1759,7 +1759,7 @@ void Assembler::GrowBuffer() { - // buffer nor pc absolute pointing inside the code buffer, so there is no need - // to relocate any emitted relocation entries. - --#if ABI_USES_FUNCTION_DESCRIPTORS -+#if defined(ABI_USES_FUNCTION_DESCRIPTORS) - // Relocate runtime entries. - for (RelocIterator it(desc); !it.done(); it.next()) { - RelocInfo::Mode rmode = it.rinfo()->rmode(); -diff -up v8-3.14.5.10/src/ppc/assembler-ppc-inl.h.ppc-harder v8-3.14.5.10/src/ppc/assembler-ppc-inl.h -diff -up v8-3.14.5.10/src/ppc/builtins-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/builtins-ppc.cc ---- v8-3.14.5.10/src/ppc/builtins-ppc.cc.ppc-harder 2017-03-01 12:47:36.473607207 -0500 -+++ v8-3.14.5.10/src/ppc/builtins-ppc.cc 2017-03-01 12:47:36.516606131 -0500 -@@ -1412,7 +1412,7 @@ void Builtins::Generate_FunctionCall(Mac - __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); - __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset)); - __ TestBit(r6, --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - SharedFunctionInfo::kStrictModeFunction, - #else - SharedFunctionInfo::kStrictModeFunction + kSmiTagSize, -@@ -1422,7 +1422,7 @@ void Builtins::Generate_FunctionCall(Mac - - // Do not transform the receiver for native (Compilerhints already in r6). - __ TestBit(r6, --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - SharedFunctionInfo::kNative, - #else - SharedFunctionInfo::kNative + kSmiTagSize, -@@ -1650,7 +1650,7 @@ void Builtins::Generate_FunctionApply(Ma - Label call_to_object, use_global_receiver; - __ lwz(r5, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset)); - __ TestBit(r5, --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - SharedFunctionInfo::kStrictModeFunction, - #else - SharedFunctionInfo::kStrictModeFunction + kSmiTagSize, -@@ -1660,7 +1660,7 @@ void Builtins::Generate_FunctionApply(Ma - - // Do not transform the receiver for strict mode functions. - __ TestBit(r5, --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - SharedFunctionInfo::kNative, - #else - SharedFunctionInfo::kNative + kSmiTagSize, -diff -up v8-3.14.5.10/src/ppc/codegen-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/codegen-ppc.cc ---- v8-3.14.5.10/src/ppc/codegen-ppc.cc.ppc-harder 2017-03-01 12:47:36.474607182 -0500 -+++ v8-3.14.5.10/src/ppc/codegen-ppc.cc 2017-03-01 12:47:36.516606131 -0500 -@@ -181,7 +181,7 @@ void ElementsTransitionGenerator::Genera - __ addi(r10, r9, Operand(FixedDoubleArray::kHeaderSize)); - __ SmiToDoubleArrayOffset(r9, r8); - __ add(r9, r10, r9); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ mov(r7, Operand(kHoleNanInt64)); - #else - __ mov(r7, Operand(kHoleNanLower32)); -@@ -236,7 +236,7 @@ void ElementsTransitionGenerator::Genera - __ CompareRoot(r22, Heap::kTheHoleValueRootIndex); - __ Assert(eq, "object found in smi-only array"); - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ std(r7, MemOperand(r10, 0)); - #else - #if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN -@@ -330,7 +330,7 @@ void ElementsTransitionGenerator::Genera - // Non-hole double, copy value into a heap number. - __ AllocateHeapNumber(r5, r3, r4, r22, &gc_required); - // r5: new heap number --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ ld(r3, MemOperand(r7, -8)); - __ addi(r4, r5, Operand(-1)); // subtract tag for std - __ std(r3, MemOperand(r4, HeapNumber::kValueOffset)); -diff -up v8-3.14.5.10/src/ppc/code-stubs-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/code-stubs-ppc.cc ---- v8-3.14.5.10/src/ppc/code-stubs-ppc.cc.ppc-harder 2017-03-01 12:47:36.477607107 -0500 -+++ v8-3.14.5.10/src/ppc/code-stubs-ppc.cc 2017-03-01 12:47:36.517606106 -0500 -@@ -660,7 +660,7 @@ void FloatingPointHelper::ConvertIntToDo - __ subi(sp, sp, Operand(8)); // reserve one temporary double on the stack - - // sign-extend src to 64-bit and store it to temp double on the stack --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ extsw(r0, src); - __ std(r0, MemOperand(sp, 0)); - #else -@@ -692,7 +692,7 @@ void FloatingPointHelper::ConvertUnsigne - __ subi(sp, sp, Operand(8)); // reserve one temporary double on the stack - - // zero-extend src to 64-bit and store it to temp double on the stack --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ clrldi(r0, src, Operand(32)); - __ std(r0, MemOperand(sp, 0)); - #else -@@ -722,7 +722,7 @@ void FloatingPointHelper::ConvertIntToFl - __ subi(sp, sp, Operand(8)); // reserve one temporary double on the stack - - // sign-extend src to 64-bit and store it to temp double on the stack --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ extsw(int_scratch, src); - __ std(int_scratch, MemOperand(sp, 0)); - #else -@@ -1559,7 +1559,7 @@ void ToBooleanStub::Generate(MacroAssemb - __ lfd(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); - __ li(r0, Operand::Zero()); - __ push(r0); --#if !V8_TARGET_ARCH_PPC64 -+#if !defined(V8_TARGET_ARCH_PPC64) - __ push(r0); - #endif - __ lfd(d2, MemOperand(sp, 0)); -@@ -1847,7 +1847,7 @@ void UnaryOpStub::GenerateHeapNumberCode - // Do the bitwise operation and check if the result fits in a smi. - __ notx(r4, r4); - --#if !V8_TARGET_ARCH_PPC64 -+#if !defined(V8_TARGET_ARCH_PPC64) - Label try_float; - __ JumpIfNotSmiCandidate(r4, r5, &try_float); - #endif -@@ -1856,7 +1856,7 @@ void UnaryOpStub::GenerateHeapNumberCode - __ SmiTag(r3, r4); - __ Ret(); - --#if !V8_TARGET_ARCH_PPC64 -+#if !defined(V8_TARGET_ARCH_PPC64) - // Try to store the result in a heap number. - __ bind(&try_float); - if (mode_ == UNARY_NO_OVERWRITE) { -@@ -2073,7 +2073,7 @@ void BinaryOpStub::GenerateSmiSmiOperati - } - case Token::MUL: { - Label mul_zero, mul_neg_zero; --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - // Remove tag from both operands. - __ SmiUntag(ip, right); - __ SmiUntag(r0, left); -@@ -2102,7 +2102,7 @@ void BinaryOpStub::GenerateSmiSmiOperati - // Go slow on zero result to handle -0. - __ cmpi(scratch1, Operand::Zero()); - __ beq(&mul_zero); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ SmiTag(right, scratch1); - #else - __ mr(right, scratch1); -@@ -2160,7 +2160,7 @@ void BinaryOpStub::GenerateSmiSmiOperati - __ sub(scratch1, ip, scratch1, LeaveOE, SetRC); - // If the result is 0, we need to check for the -0 case. - __ beq(&check_neg_zero, cr0); --#if !V8_TARGET_ARCH_PPC64 -+#if !defined(V8_TARGET_ARCH_PPC64) - // Check that the signed result fits in a Smi. - __ JumpIfNotSmiCandidate(scratch1, scratch2, ¬_smi_result); - #endif -@@ -2212,7 +2212,7 @@ void BinaryOpStub::GenerateSmiSmiOperati - __ SmiUntag(scratch1, left); - __ GetLeastBitsFromSmi(scratch2, right, 5); - __ ShiftLeft(scratch1, scratch1, scratch2); --#if !V8_TARGET_ARCH_PPC64 -+#if !defined(V8_TARGET_ARCH_PPC64) - // Check that the signed result fits in a Smi. - __ JumpIfNotSmiCandidate(scratch1, scratch2, ¬_smi_result); - #endif -@@ -2358,7 +2358,7 @@ void BinaryOpStub::GenerateFPOperation(M - // The code below for writing into heap numbers isn't capable of - // writing the register as an unsigned int so we go to slow case if we - // hit this case. --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - const Condition cond = ne; - __ srw(r5, r6, r5); - __ TestSignBit32(r5, r0); -@@ -2378,7 +2378,7 @@ void BinaryOpStub::GenerateFPOperation(M - UNREACHABLE(); - } - --#if !V8_TARGET_ARCH_PPC64 -+#if !defined(V8_TARGET_ARCH_PPC64) - // Check that the *signed* result fits in a smi. - __ JumpIfNotSmiCandidate(r5, r6, &result_not_a_smi); - #endif -@@ -2631,7 +2631,7 @@ void BinaryOpStub::GenerateInt32Stub(Mac - &transition : &return_heap_number); - __ bne(not_int32); - --#if !V8_TARGET_ARCH_PPC64 -+#if !defined(V8_TARGET_ARCH_PPC64) - // Check if the result fits in a smi. - // If not try to return a heap number. - __ JumpIfNotSmiCandidate(scratch1, scratch2, &return_heap_number); -@@ -2643,7 +2643,7 @@ void BinaryOpStub::GenerateInt32Stub(Mac - - __ subi(sp, sp, Operand(8)); - __ stfd(d1, MemOperand(sp, 0)); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ ld(scratch2, MemOperand(sp, 0)); - #else - #if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN -@@ -2743,7 +2743,7 @@ void BinaryOpStub::GenerateInt32Stub(Mac - // We only get a negative result if the shift value (r5) is 0. - // This result cannot be respresented as a signed 32-bit integer, try - // to return a heap number if we can. --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - const Condition cond = ne; - __ srw(r5, r6, r5); - __ TestSignBit32(r5, r0); -@@ -2764,7 +2764,7 @@ void BinaryOpStub::GenerateInt32Stub(Mac - UNREACHABLE(); - } - --#if !V8_TARGET_ARCH_PPC64 -+#if !defined(V8_TARGET_ARCH_PPC64) - // Check if the result fits in a smi. - // If not try to return a heap number. (We know the result is an int32.) - __ JumpIfNotSmiCandidate(r5, scratch1, &return_heap_number); -@@ -3084,7 +3084,7 @@ void TranscendentalCacheStub::Generate(M - char* elem_in1 = reinterpret_cast(&(test_elem[0].in[1])); - char* elem_out = reinterpret_cast(&(test_elem[0].output)); - // Two uint_32's and a pointer. --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - CHECK_EQ(16, static_cast(elem2_start - elem_start)); - #else - CHECK_EQ(12, static_cast(elem2_start - elem_start)); -@@ -3095,7 +3095,7 @@ void TranscendentalCacheStub::Generate(M - } - #endif - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - // Find the address of the r4'th entry in the cache, i.e., &r3[r4*16]. - __ ShiftLeftImm(scratch0, r4, Operand(4)); - #else -@@ -3598,12 +3598,12 @@ void CEntryStub::GenerateCore(MacroAssem - - __ mov(isolate_reg, Operand(ExternalReference::isolate_address())); - --#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR) -+#if defined(ABI_USES_FUNCTION_DESCRIPTORS) && !defined(USE_SIMULATOR) - // Native AIX/PPC64 Linux use a function descriptor. - __ LoadP(ToRegister(2), MemOperand(r15, kPointerSize)); // TOC - __ LoadP(ip, MemOperand(r15, 0)); // Instruction address - Register target = ip; --#elif ABI_TOC_ADDRESSABILITY_VIA_IP -+#elif defined(ABI_TOC_ADDRESSABILITY_VIA_IP) - Register target = ip; - __ Move(ip, r15); - #else -@@ -3814,7 +3814,7 @@ void JSEntryStub::GenerateBody(MacroAsse - Label invoke, handler_entry, exit; - - // Called from C --#if ABI_USES_FUNCTION_DESCRIPTORS -+#ifdef ABI_USES_FUNCTION_DESCRIPTORS - __ function_descriptor(); - #endif - -@@ -3993,7 +3993,7 @@ void InstanceofStub::Generate(MacroAssem - const Register scratch2 = r8; - Register scratch3 = no_reg; - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - const int32_t kDeltaToLoadBoolResult = 9 * Assembler::kInstrSize; - #else - const int32_t kDeltaToLoadBoolResult = 5 * Assembler::kInstrSize; -@@ -4875,7 +4875,7 @@ void RegExpExecStub::Generate(MacroAssem - __ addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag)); - - --#if ABI_USES_FUNCTION_DESCRIPTORS && defined(USE_SIMULATOR) -+#if defined(ABI_USES_FUNCTION_DESCRIPTORS) && defined(USE_SIMULATOR) - // Even Simulated AIX/PPC64 Linux uses a function descriptor for the - // RegExp routine. Extract the instruction address here since - // DirectCEntryStub::GenerateCall will not do it for calls out to -@@ -6777,12 +6777,12 @@ void DirectCEntryStub::GenerateCall(Macr - void DirectCEntryStub::GenerateCall(MacroAssembler* masm, - Register target) { - Register scratch = r11; --#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR) -+#if defined(ABI_USES_FUNCTION_DESCRIPTORS) && !defined(USE_SIMULATOR) - Register dest = ip; - // Native AIX/PPC64 Linux use a function descriptor. - __ LoadP(ToRegister(2), MemOperand(target, kPointerSize)); // TOC - __ LoadP(ip, MemOperand(target, 0)); // Instruction address --#elif ABI_TOC_ADDRESSABILITY_VIA_IP -+#elif defined(ABI_TOC_ADDRESSABILITY_VIA_IP) - Register dest = ip; - __ Move(ip, target); - #else -@@ -7411,7 +7411,7 @@ void StoreArrayLiteralElementStub::Gener - __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset)); - __ SmiToPtrArrayOffset(r9, r6); - __ add(r9, r8, r9); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - // add due to offset alignment requirements of StorePU - __ addi(r9, r9, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ StoreP(r3, MemOperand(r9)); -@@ -7485,11 +7485,11 @@ void ProfileEntryHookStub::Generate(Macr - __ mov(ip, Operand(reinterpret_cast(&entry_hook_))); - __ LoadP(ip, MemOperand(ip)); - --#if ABI_USES_FUNCTION_DESCRIPTORS -+#ifdef ABI_USES_FUNCTION_DESCRIPTORS - // Function descriptor - __ LoadP(ToRegister(2), MemOperand(ip, kPointerSize)); - __ LoadP(ip, MemOperand(ip, 0)); --#elif ABI_TOC_ADDRESSABILITY_VIA_IP -+#elif defined(ABI_TOC_ADDRESSABILITY_VIA_IP) - // ip already set. - #endif - -diff -up v8-3.14.5.10/src/ppc/deoptimizer-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/deoptimizer-ppc.cc ---- v8-3.14.5.10/src/ppc/deoptimizer-ppc.cc.ppc-harder 2017-03-01 12:47:36.480607032 -0500 -+++ v8-3.14.5.10/src/ppc/deoptimizer-ppc.cc 2017-03-01 12:47:36.518606081 -0500 -@@ -42,7 +42,7 @@ const int Deoptimizer::table_entry_size_ - - - int Deoptimizer::patch_size() { --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - const int kCallInstructionSizeInWords = 7; - #else - const int kCallInstructionSizeInWords = 4; -@@ -121,7 +121,7 @@ void Deoptimizer::DeoptimizeFunction(JSF - } - - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - static const int32_t kBranchBeforeStackCheck = 0x409c0020; - static const int32_t kBranchBeforeInterrupt = 0x409c0044; - #else -@@ -154,7 +154,7 @@ void Deoptimizer::PatchStackCheckCodeAt( - ASSERT(Memory::int32_at(pc_after - 2 * kInstrSize) == 0x7d8803a6); - ASSERT(Memory::int32_at(pc_after - kInstrSize) == 0x4e800021); - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - ASSERT(Assembler::Is64BitLoadIntoR12( - Assembler::instr_at(pc_after - 7 * kInstrSize), - Assembler::instr_at(pc_after - 6 * kInstrSize), -@@ -188,7 +188,7 @@ void Deoptimizer::PatchStackCheckCodeAt( - // 7d8803a6 mtlr r12 - // 4e800021 blrl - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - CodePatcher patcher(pc_after - 8 * kInstrSize, 6); - - // Assemble the 64 bit value from the five part load and verify -@@ -222,7 +222,7 @@ void Deoptimizer::PatchStackCheckCodeAt( - patcher.masm()->mov(ip, - Operand(reinterpret_cast(replacement_code->entry()))); - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( - unoptimized_code, pc_after - 7 * kInstrSize, replacement_code); - #else -@@ -242,7 +242,7 @@ void Deoptimizer::RevertStackCheckCodeAt - ASSERT(Memory::int32_at(pc_after - 2 * kInstrSize) == 0x7d8803a6); - ASSERT(Memory::int32_at(pc_after - kInstrSize) == 0x4e800021); - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - ASSERT(Assembler::Is64BitLoadIntoR12( - Assembler::instr_at(pc_after - 7 * kInstrSize), - Assembler::instr_at(pc_after - 6 * kInstrSize), -@@ -255,7 +255,7 @@ void Deoptimizer::RevertStackCheckCodeAt - Assembler::instr_at(pc_after - 3 * kInstrSize))); - #endif - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - // Replace NOP with conditional jump. - CodePatcher patcher(pc_after - 8 * kInstrSize, 6); - if (FLAG_count_based_interrupts) { -@@ -285,7 +285,7 @@ void Deoptimizer::RevertStackCheckCodeAt - } - #endif - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - // Assemble the 64 bit value from the five part load and verify - // that it is the stack guard code - uint64_t stack_check_address = -@@ -313,7 +313,7 @@ void Deoptimizer::RevertStackCheckCodeAt - patcher.masm()->mov(ip, - Operand(reinterpret_cast(check_code->entry()))); - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( - unoptimized_code, pc_after - 7 * kInstrSize, check_code); - #else -diff -up v8-3.14.5.10/src/ppc/disasm-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/disasm-ppc.cc ---- v8-3.14.5.10/src/ppc/disasm-ppc.cc.ppc-harder 2017-03-01 12:47:36.480607032 -0500 -+++ v8-3.14.5.10/src/ppc/disasm-ppc.cc 2017-03-01 12:47:36.518606081 -0500 -@@ -346,7 +346,7 @@ int Decoder::FormatOption(Instruction* i - return 2; - } - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - case 'd': { // ds value for offset - int32_t value = SIGN_EXT_IMM16(instr->Bits(15, 0) & ~3); - out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, -@@ -565,7 +565,7 @@ void Decoder::DecodeExt2(Instruction* in - Format(instr, "srw'. 'ra, 'rs, 'rb"); - return; - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - case SRDX: { - Format(instr, "srd'. 'ra, 'rs, 'rb"); - return; -@@ -575,7 +575,7 @@ void Decoder::DecodeExt2(Instruction* in - Format(instr, "sraw'. 'ra, 'rs, 'rb"); - return; - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - case SRAD: { - Format(instr, "srad'. 'ra, 'rs, 'rb"); - return; -@@ -589,7 +589,7 @@ void Decoder::DecodeExt2(Instruction* in - Format(instr, "extsh'. 'ra, 'rs"); - return; - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - case EXTSW: { - Format(instr, "extsw'. 'ra, 'rs"); - return; -@@ -650,7 +650,7 @@ void Decoder::DecodeExt2(Instruction* in - Format(instr, "slw'. 'ra, 'rs, 'rb"); - break; - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - case SLDX: { - Format(instr, "sld'. 'ra, 'rs, 'rb"); - break; -@@ -668,7 +668,7 @@ void Decoder::DecodeExt2(Instruction* in - Format(instr, "cntlzw'. 'ra, 'rs"); - break; - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - case CNTLZDX: { - Format(instr, "cntlzd'. 'ra, 'rs"); - break; -@@ -710,7 +710,7 @@ void Decoder::DecodeExt2(Instruction* in - Format(instr, "mullw'o'. 'rt, 'ra, 'rb"); - break; - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - case MULLD: { - Format(instr, "mulld'o'. 'rt, 'ra, 'rb"); - break; -@@ -720,7 +720,7 @@ void Decoder::DecodeExt2(Instruction* in - Format(instr, "divw'o'. 'rt, 'ra, 'rb"); - break; - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - case DIVD: { - Format(instr, "divd'o'. 'rt, 'ra, 'rb"); - break; -@@ -814,7 +814,7 @@ void Decoder::DecodeExt2(Instruction* in - Format(instr, "lhzux 'rt, 'ra, 'rb"); - break; - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - case LDX: { - Format(instr, "ldx 'rt, 'ra, 'rb"); - break; -@@ -1210,7 +1210,7 @@ int Decoder::InstructionDecode(byte* ins - DecodeExt5(instr); - break; - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - case LD: { - switch (instr->Bits(1, 0)) { - case 0: -diff -up v8-3.14.5.10/src/ppc/full-codegen-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/full-codegen-ppc.cc ---- v8-3.14.5.10/src/ppc/full-codegen-ppc.cc.ppc-harder 2017-03-01 12:47:36.482606982 -0500 -+++ v8-3.14.5.10/src/ppc/full-codegen-ppc.cc 2017-03-01 12:47:36.518606081 -0500 -@@ -451,7 +451,7 @@ void FullCodeGenerator::EmitReturnSequen - masm_->mtlr(r0); - masm_->Add(sp, sp, (uint32_t)(sp_delta + (2 * kPointerSize)), r0); - masm_->blr(); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - // With 64bit we need a couple of nop() instructions to ensure we have - // enough space to SetDebugBreakAtReturn() - masm_->nop(); -@@ -1974,7 +1974,7 @@ void FullCodeGenerator::EmitInlineSmiBin - case Token::SHL: { - __ b(&stub_call); - __ GetLeastBitsFromSmi(scratch2, right, 5); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ ShiftLeft(right, left, scratch2); - #else - __ SmiUntag(scratch1, left); -@@ -2025,7 +2025,7 @@ void FullCodeGenerator::EmitInlineSmiBin - } - case Token::MUL: { - Label mul_zero; --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - // Remove tag from both operands. - __ SmiUntag(ip, right); - __ SmiUntag(r0, left); -@@ -2046,7 +2046,7 @@ void FullCodeGenerator::EmitInlineSmiBin - // Go slow on zero result to handle -0. - __ cmpi(scratch1, Operand::Zero()); - __ beq(&mul_zero); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ SmiTag(right, scratch1); - #else - __ mr(right, scratch1); -@@ -3695,7 +3695,7 @@ void FullCodeGenerator::EmitFastAsciiArr - // string_length to get the length of the result string. - __ LoadP(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset)); - __ sub(string_length, string_length, scratch1); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ SmiUntag(scratch1, scratch1); - __ Mul(scratch2, array_length, scratch1); - // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are -diff -up v8-3.14.5.10/src/ppc/ic-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/ic-ppc.cc ---- v8-3.14.5.10/src/ppc/ic-ppc.cc.ppc-harder 2017-03-01 12:47:36.483606957 -0500 -+++ v8-3.14.5.10/src/ppc/ic-ppc.cc 2017-03-01 12:47:36.519606056 -0500 -@@ -1807,7 +1807,7 @@ void PatchInlinedSmiCode(Address address - patcher.masm()->TestIfSmi(reg, r0); - } else { - ASSERT(check == DISABLE_INLINED_SMI_CHECK); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - ASSERT(Assembler::IsRldicl(instr_at_patch)); - #else - ASSERT(Assembler::IsRlwinm(instr_at_patch)); -diff -up v8-3.14.5.10/src/ppc/lithium-codegen-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/lithium-codegen-ppc.cc ---- v8-3.14.5.10/src/ppc/lithium-codegen-ppc.cc.ppc-harder 2017-03-01 12:47:36.486606882 -0500 -+++ v8-3.14.5.10/src/ppc/lithium-codegen-ppc.cc 2017-03-01 12:47:36.519606056 -0500 -@@ -922,7 +922,7 @@ void LCodeGen::DoModI(LModI* instr) { - DeoptimizeIf(eq, instr->environment()); - } - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ extsw(scratch, scratch); - #endif - __ Mul(scratch, divisor, scratch); -@@ -973,7 +973,7 @@ void LCodeGen::DoDivI(LDivI* instr) { - __ bind(&left_not_min_int); - } - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ extsw(result, result); - #endif - -@@ -1049,7 +1049,7 @@ void LCodeGen::DoMathFloorOfDiv(LMathFlo - // The multiplier is a uint32. - ASSERT(multiplier > 0 && - multiplier < (static_cast(1) << 32)); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ extsw(scratch, dividend); - if (divisor < 0 && - instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { -@@ -1175,7 +1175,7 @@ void LCodeGen::DoMulI(LMulI* instr) { - - if (can_overflow) { - // scratch:result = left * right. --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ Mul(result, left, right); - __ TestIfInt32(result, scratch, r0); - DeoptimizeIf(ne, instr->environment()); -@@ -1269,14 +1269,14 @@ void LCodeGen::DoShiftI(LShiftI* instr) - switch (instr->op()) { - case Token::SAR: - __ sraw(result, left, scratch); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ extsw(result, result); - #endif - break; - case Token::SHR: - if (instr->can_deopt()) { - __ srw(result, left, scratch, SetRC); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ extsw(result, result, SetRC); - #endif - DeoptimizeIf(lt, instr->environment(), cr0); -@@ -1286,7 +1286,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) - break; - case Token::SHL: - __ slw(result, left, scratch); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ extsw(result, result); - #endif - break; -@@ -1302,7 +1302,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) - case Token::SAR: - if (shift_count != 0) { - __ srawi(result, left, shift_count); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ extsw(result, result); - #endif - } else { -@@ -1323,7 +1323,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) - case Token::SHL: - if (shift_count != 0) { - __ slwi(result, left, Operand(shift_count)); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ extsw(result, result); - #endif - } else { -@@ -1360,7 +1360,7 @@ void LCodeGen::DoSubI(LSubI* instr) { - right_reg, - scratch0(), r0); - // Doptimize on overflow --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ extsw(scratch0(), scratch0(), SetRC); - #endif - DeoptimizeIf(lt, instr->environment(), cr0); -@@ -1530,7 +1530,7 @@ void LCodeGen::DoAddI(LAddI* instr) { - ToRegister(left), - right_reg, - scratch0(), r0); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ extsw(scratch0(), scratch0(), SetRC); - #endif - // Doptimize on overflow -@@ -2359,7 +2359,7 @@ void LCodeGen::DoDeferredInstanceOfKnown - Register temp = ToRegister(instr->temp()); - ASSERT(temp.is(r7)); - __ LoadHeapObject(InstanceofStub::right(), instr->function()); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - static const int kAdditionalDelta = 13; - #else - static const int kAdditionalDelta = 7; -@@ -2887,7 +2887,7 @@ MemOperand LCodeGen::PrepareKeyedOperand - - if (additional_index) { - if (key_is_tagged) { --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - // more efficient to just untag - __ SmiUntag(scratch, key); - key_is_tagged = false; -@@ -2988,7 +2988,7 @@ void LCodeGen::DoLoadKeyedSpecializedArr - } else { - __ lwzx(result, mem_operand); - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ extsw(result, result); - #endif - break; -@@ -3096,7 +3096,7 @@ void LCodeGen::DoWrapReceiver(LWrapRecei - __ lwz(scratch, - FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); - __ TestBit(scratch, --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - SharedFunctionInfo::kStrictModeFunction, - #else - SharedFunctionInfo::kStrictModeFunction + kSmiTagSize, -@@ -3106,7 +3106,7 @@ void LCodeGen::DoWrapReceiver(LWrapRecei - - // Do not transform the receiver to object for builtins. - __ TestBit(scratch, --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - SharedFunctionInfo::kNative, - #else - SharedFunctionInfo::kNative + kSmiTagSize, -@@ -4402,7 +4402,7 @@ void LCodeGen::DoNumberTagI(LNumberTagI* - Register dst = ToRegister(instr->result()); - - DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ SmiTag(dst, src); - #else - __ SmiTagCheckOverflow(dst, src, r0); -diff -up v8-3.14.5.10/src/ppc/lithium-gap-resolver-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/lithium-gap-resolver-ppc.cc ---- v8-3.14.5.10/src/ppc/lithium-gap-resolver-ppc.cc.ppc-harder 2017-03-01 12:47:36.487606857 -0500 -+++ v8-3.14.5.10/src/ppc/lithium-gap-resolver-ppc.cc 2017-03-01 12:47:36.519606056 -0500 -@@ -280,7 +280,7 @@ void LGapResolver::EmitMove(int index) { - if (in_cycle_) { - // kSavedDoubleValueRegister was used to break the cycle, - // but kSavedValueRegister is free. --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ ld(kSavedValueRegister, source_operand); - __ std(kSavedValueRegister, destination_operand); - #else -diff -up v8-3.14.5.10/src/ppc/macro-assembler-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/macro-assembler-ppc.cc ---- v8-3.14.5.10/src/ppc/macro-assembler-ppc.cc.ppc-harder 2017-03-01 12:47:36.491606757 -0500 -+++ v8-3.14.5.10/src/ppc/macro-assembler-ppc.cc 2017-03-01 12:47:36.520606031 -0500 -@@ -132,7 +132,7 @@ int MacroAssembler::CallSize( - } - #else - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - movSize = 5; - #else - movSize = 2; -@@ -160,7 +160,7 @@ int MacroAssembler::CallSizeNotPredictab - } - #else - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - movSize = 5; - #else - movSize = 2; -@@ -196,7 +196,7 @@ void MacroAssembler::Call(Address target - mtlr(ip); - bclr(BA, SetLK); - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - ASSERT(kCallTargetAddressOffset == 7 * kInstrSize); - #else - ASSERT(kCallTargetAddressOffset == 4 * kInstrSize); -@@ -1773,7 +1773,7 @@ void MacroAssembler::StoreNumberToDouble - Register scratch4, - Label* fail) { - Label smi_value, maybe_nan, have_double_value, is_nan, done; --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - Register double_reg = scratch2; - #else - Register mantissa_reg = scratch2; -@@ -1792,7 +1792,7 @@ void MacroAssembler::StoreNumberToDouble - - // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 - // in the exponent. --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - mov(scratch1, Operand(kLastNonNaNInt64)); - addi(scratch3, value_reg, Operand(-kHeapObjectTag)); - ld(double_reg, MemOperand(scratch3, HeapNumber::kValueOffset)); -@@ -1804,14 +1804,14 @@ void MacroAssembler::StoreNumberToDouble - #endif - bge(&maybe_nan); - --#if !V8_TARGET_ARCH_PPC64 -+#if !defined(V8_TARGET_ARCH_PPC64) - lwz(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); - #endif - - bind(&have_double_value); - SmiToDoubleArrayOffset(scratch1, key_reg); - add(scratch1, elements_reg, scratch1); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - addi(scratch1, scratch1, Operand(-kHeapObjectTag)); - std(double_reg, MemOperand(scratch1, FixedDoubleArray::kHeaderSize)); - #else -@@ -1831,7 +1831,7 @@ void MacroAssembler::StoreNumberToDouble - // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise - // it's an Infinity, and the non-NaN code path applies. - bgt(&is_nan); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - clrldi(r0, double_reg, Operand(32), SetRC); - beq(&have_double_value, cr0); - #else -@@ -1843,7 +1843,7 @@ void MacroAssembler::StoreNumberToDouble - // Load canonical NaN for storing into the double array. - uint64_t nan_int64 = BitCast( - FixedDoubleArray::canonical_not_the_hole_nan_as_double()); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - mov(double_reg, Operand(nan_int64)); - #else - mov(mantissa_reg, Operand(static_cast(nan_int64))); -@@ -2036,7 +2036,7 @@ void MacroAssembler::TryGetFunctionProto - lwz(scratch, - FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); - TestBit(scratch, --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - SharedFunctionInfo::kBoundFunction, - #else - SharedFunctionInfo::kBoundFunction + kSmiTagSize, -@@ -2122,7 +2122,7 @@ void MacroAssembler::CallApiFunctionAndR - addi(r29, r29, Operand(1)); - stw(r29, MemOperand(r26, kLevelOffset)); - --#if !ABI_RETURNS_HANDLES_IN_REGS -+#if !defined(ABI_RETURNS_HANDLES_IN_REGS) - // PPC LINUX ABI - // The return value is pointer-sized non-scalar value. - // Space has already been allocated on the stack which will pass as an -@@ -2136,7 +2136,7 @@ void MacroAssembler::CallApiFunctionAndR - DirectCEntryStub stub; - stub.GenerateCall(this, function); - --#if !ABI_RETURNS_HANDLES_IN_REGS -+#if !defined(ABI_RETURNS_HANDLES_IN_REGS) - // Retrieve return value from stack buffer - LoadP(r3, MemOperand(r3)); - #endif -@@ -2228,7 +2228,7 @@ void MacroAssembler::IndexFromHash(Regis - STATIC_ASSERT(String::kHashShift == 2); - STATIC_ASSERT(String::kArrayIndexValueBits == 24); - // index = SmiTag((hash >> 2) & 0x00FFFFFF); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - ExtractBitRange(index, hash, 25, 2); - SmiTag(index); - #else -@@ -2264,7 +2264,7 @@ void MacroAssembler::ConvertToInt32(Regi - - addi(sp, sp, Operand(-kDoubleSize)); - stfd(double_scratch, MemOperand(sp, 0)); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - ld(dest, MemOperand(sp, 0)); - #else - #if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN -@@ -2279,7 +2279,7 @@ void MacroAssembler::ConvertToInt32(Regi - - // The result is not a 32-bit integer when the high 33 bits of the - // result are not identical. --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - TestIfInt32(dest, scratch, scratch2); - #else - TestIfInt32(scratch, dest, scratch2); -@@ -2305,7 +2305,7 @@ void MacroAssembler::EmitVFPTruncate(VFP - - addi(sp, sp, Operand(-kDoubleSize)); - stfd(double_scratch, MemOperand(sp, 0)); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - ld(result, MemOperand(sp, 0)); - #else - #if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN -@@ -2320,7 +2320,7 @@ void MacroAssembler::EmitVFPTruncate(VFP - - // The result is a 32-bit integer when the high 33 bits of the - // result are identical. --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - TestIfInt32(result, scratch, r0); - #else - TestIfInt32(scratch, result, r0); -@@ -2439,7 +2439,7 @@ void MacroAssembler::EmitECMATruncate(Re - - // reserve a slot on the stack - stfdu(double_scratch, MemOperand(sp, -8)); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - ld(result, MemOperand(sp, 0)); - #else - #if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN -@@ -2453,7 +2453,7 @@ void MacroAssembler::EmitECMATruncate(Re - - // The result is a 32-bit integer when the high 33 bits of the - // result are identical. --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - TestIfInt32(result, scratch, r0); - #else - TestIfInt32(scratch, result, r0); -@@ -2484,7 +2484,7 @@ void MacroAssembler::EmitECMATruncate(Re - void MacroAssembler::GetLeastBitsFromSmi(Register dst, - Register src, - int num_least_bits) { --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - rldicl(dst, src, kBitsPerPointer - kSmiShift, - kBitsPerPointer - num_least_bits); - #else -@@ -2519,7 +2519,7 @@ void MacroAssembler::CallRuntime(const R - // smarter. - mov(r3, Operand(num_arguments)); - mov(r4, Operand(ExternalReference(f, isolate()))); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - CEntryStub stub(f->result_size); - #else - CEntryStub stub(1); -@@ -2859,7 +2859,7 @@ void MacroAssembler::JumpIfNotPowerOfTwo - bne(not_power_of_two, cr0); - } - --#if !V8_TARGET_ARCH_PPC64 -+#if !defined(V8_TARGET_ARCH_PPC64) - void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) { - ASSERT(!reg.is(overflow)); - mr(overflow, reg); // Save original value. -@@ -3141,7 +3141,7 @@ void MacroAssembler::CopyBytes(Register - stb(scratch, MemOperand(dst, 2)); - ShiftRightImm(scratch, scratch, Operand(8)); - stb(scratch, MemOperand(dst, 3)); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - ShiftRightImm(scratch, scratch, Operand(8)); - stb(scratch, MemOperand(dst, 4)); - ShiftRightImm(scratch, scratch, Operand(8)); -@@ -3152,7 +3152,7 @@ void MacroAssembler::CopyBytes(Register - stb(scratch, MemOperand(dst, 7)); - #endif - #else --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - stb(scratch, MemOperand(dst, 7)); - ShiftRightImm(scratch, scratch, Operand(8)); - stb(scratch, MemOperand(dst, 6)); -@@ -3356,13 +3356,13 @@ void MacroAssembler::CallCFunctionHelper - // Just call directly. The function called cannot cause a GC, or - // allow preemption, so the return address in the link register - // stays correct. --#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR) -+#if defined(ABI_USES_FUNCTION_DESCRIPTORS) && !defined(USE_SIMULATOR) - // AIX uses a function descriptor. When calling C code be aware - // of this descriptor and pick up values from it - Register dest = ip; - LoadP(ToRegister(2), MemOperand(function, kPointerSize)); - LoadP(dest, MemOperand(function, 0)); --#elif ABI_TOC_ADDRESSABILITY_VIA_IP -+#elif defined(ABI_TOC_ADDRESSABILITY_VIA_IP) - Register dest = ip; - Move(ip, function); - #else -@@ -3427,7 +3427,7 @@ void MacroAssembler::PatchRelocatedValue - } - - // insert new high word into lis instruction --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - srdi(ip, new_value, Operand(32)); - rlwimi(scratch, ip, 16, 16, 31); - #else -@@ -3446,14 +3446,14 @@ void MacroAssembler::PatchRelocatedValue - } - - // insert new low word into ori instruction --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - rlwimi(scratch, ip, 0, 16, 31); - #else - rlwimi(scratch, new_value, 0, 16, 31); - #endif - stw(scratch, MemOperand(lis_location, kInstrSize)); - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - if (emit_debug_code()) { - lwz(scratch, MemOperand(lis_location, 2*kInstrSize)); - // scratch is now sldi. -@@ -3487,7 +3487,7 @@ void MacroAssembler::PatchRelocatedValue - #endif - - // Update the I-cache so the new lis and addic can be executed. --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - FlushICache(lis_location, 5 * kInstrSize, scratch); - #else - FlushICache(lis_location, 2 * kInstrSize, scratch); -@@ -3519,7 +3519,7 @@ void MacroAssembler::GetRelocatedValueLo - // Copy the low 16bits from ori instruction into result - rlwimi(result, scratch, 0, 16, 31); - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - if (emit_debug_code()) { - lwz(scratch, MemOperand(lis_location, 2*kInstrSize)); - // scratch is now sldi. -@@ -3698,7 +3698,7 @@ void MacroAssembler::EnsureNotWhite( - Register map = load_scratch; // Holds map while checking type. - Register length = load_scratch; // Holds length of object after testing type. - Label is_data_object, maybe_string_object, is_string_object, is_encoded; --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - Label length_computed; - #endif - -@@ -3744,11 +3744,11 @@ void MacroAssembler::EnsureNotWhite( - andi(r0, instance_type, Operand(kStringEncodingMask)); - beq(&is_encoded, cr0); - SmiUntag(ip); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - b(&length_computed); - #endif - bind(&is_encoded); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - SmiToShortArrayOffset(ip, ip); - bind(&length_computed); - #else -@@ -3932,7 +3932,7 @@ void MacroAssembler::LoadIntLiteral(Regi - - void MacroAssembler::LoadSmiLiteral(Register dst, Smi *smi) { - intptr_t value = reinterpret_cast(smi); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - ASSERT((value & 0xffffffff) == 0); - LoadIntLiteral(dst, value >> 32); - ShiftLeftImm(dst, dst, Operand(32)); -@@ -3949,7 +3949,7 @@ void MacroAssembler::LoadDoubleLiteral(D - // avoid gcc strict aliasing error using union cast - union { - double dval; --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - intptr_t ival; - #else - intptr_t ival[2]; -@@ -3957,7 +3957,7 @@ void MacroAssembler::LoadDoubleLiteral(D - } litVal; - - litVal.dval = value; --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - mov(scratch, Operand(litVal.ival)); - std(scratch, MemOperand(sp)); - #else -@@ -4053,7 +4053,7 @@ void MacroAssembler::Xor(Register ra, Re - - void MacroAssembler::CmpSmiLiteral(Register src1, Smi *smi, Register scratch, - CRegister cr) { --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - LoadSmiLiteral(scratch, smi); - cmp(src1, scratch, cr); - #else -@@ -4063,7 +4063,7 @@ void MacroAssembler::CmpSmiLiteral(Regis - - void MacroAssembler::CmplSmiLiteral(Register src1, Smi *smi, Register scratch, - CRegister cr) { --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - LoadSmiLiteral(scratch, smi); - cmpl(src1, scratch, cr); - #else -@@ -4073,7 +4073,7 @@ void MacroAssembler::CmplSmiLiteral(Regi - - void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi *smi, - Register scratch) { --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - LoadSmiLiteral(scratch, smi); - add(dst, src, scratch); - #else -@@ -4083,7 +4083,7 @@ void MacroAssembler::AddSmiLiteral(Regis - - void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi *smi, - Register scratch) { --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - LoadSmiLiteral(scratch, smi); - sub(dst, src, scratch); - #else -@@ -4093,7 +4093,7 @@ void MacroAssembler::SubSmiLiteral(Regis - - void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi *smi, - Register scratch, RCBit rc) { --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - LoadSmiLiteral(scratch, smi); - and_(dst, src, scratch, rc); - #else -@@ -4110,13 +4110,13 @@ void MacroAssembler::LoadP(Register dst, - if (!scratch.is(no_reg) && !is_int16(offset)) { - /* cannot use d-form */ - LoadIntLiteral(scratch, offset); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - ldx(dst, MemOperand(mem.ra(), scratch)); - #else - lwzx(dst, MemOperand(mem.ra(), scratch)); - #endif - } else { --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - int misaligned = (offset & 3); - if (misaligned) { - // adjust base to conform to offset alignment requirements -@@ -4141,13 +4141,13 @@ void MacroAssembler::StoreP(Register src - if (!scratch.is(no_reg) && !is_int16(offset)) { - /* cannot use d-form */ - LoadIntLiteral(scratch, offset); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - stdx(src, MemOperand(mem.ra(), scratch)); - #else - stwx(src, MemOperand(mem.ra(), scratch)); - #endif - } else { --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - int misaligned = (offset & 3); - if (misaligned) { - // adjust base to conform to offset alignment requirements -@@ -4176,14 +4176,14 @@ void MacroAssembler::LoadWordArith(Regis - if (!scratch.is(no_reg) && !is_int16(offset)) { - /* cannot use d-form */ - LoadIntLiteral(scratch, offset); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - // lwax(dst, MemOperand(mem.ra(), scratch)); - ASSERT(0); // lwax not yet implemented - #else - lwzx(dst, MemOperand(mem.ra(), scratch)); - #endif - } else { --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - int misaligned = (offset & 3); - if (misaligned) { - // adjust base to conform to offset alignment requirements -diff -up v8-3.14.5.10/src/ppc/regexp-macro-assembler-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/regexp-macro-assembler-ppc.cc ---- v8-3.14.5.10/src/ppc/regexp-macro-assembler-ppc.cc.ppc-harder 2017-03-01 12:47:36.493606707 -0500 -+++ v8-3.14.5.10/src/ppc/regexp-macro-assembler-ppc.cc 2017-03-01 12:47:36.521606006 -0500 -@@ -139,7 +139,7 @@ RegExpMacroAssemblerPPC::RegExpMacroAsse - ASSERT_EQ(0, registers_to_save % 2); - - // Called from C --#if ABI_USES_FUNCTION_DESCRIPTORS -+#ifdef ABI_USES_FUNCTION_DESCRIPTORS - __ function_descriptor(); - #endif - -@@ -1434,7 +1434,7 @@ void RegExpCEntryStub::Generate(MacroAss - extra_stack_slots += kNumRequiredStackFrameSlots; - __ addi(sp, sp, Operand(-extra_stack_slots * kPointerSize)); - --#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR) -+#if defined(ABI_USES_FUNCTION_DESCRIPTORS) && !defined(USE_SIMULATOR) - // Native AIX/PPC64 Linux use a function descriptor. - __ LoadP(ToRegister(2), MemOperand(r26, kPointerSize)); // TOC - __ LoadP(ip, MemOperand(r26, 0)); // Instruction address -diff -up v8-3.14.5.10/src/ppc/simulator-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/simulator-ppc.cc ---- v8-3.14.5.10/src/ppc/simulator-ppc.cc.ppc-harder 2017-03-01 12:47:36.495606657 -0500 -+++ v8-3.14.5.10/src/ppc/simulator-ppc.cc 2017-03-01 12:47:36.521606006 -0500 -@@ -1332,7 +1332,7 @@ void Simulator::SoftwareInterrupt(Instru - PrintF("\n"); - } - CHECK(stack_aligned); --#if ABI_RETURNS_HANDLES_IN_REGS -+#ifdef ABI_RETURNS_HANDLES_IN_REGS - intptr_t p0 = arg0; - #else - intptr_t p0 = arg1; -@@ -1341,7 +1341,7 @@ void Simulator::SoftwareInterrupt(Instru - if (::v8::internal::FLAG_trace_sim) { - PrintF("Returned %p\n", reinterpret_cast(*result)); - } --#if ABI_RETURNS_HANDLES_IN_REGS -+#ifdef ABI_RETURNS_HANDLES_IN_REGS - arg0 = (intptr_t)*result; - #else - *(reinterpret_cast(arg0)) = (intptr_t) *result; -@@ -1363,21 +1363,21 @@ void Simulator::SoftwareInterrupt(Instru - PrintF("\n"); - } - CHECK(stack_aligned); --#if ABI_RETURNS_HANDLES_IN_REGS -+#ifdef ABI_RETURNS_HANDLES_IN_REGS - intptr_t p0 = arg0; - intptr_t p1 = arg1; - #else - intptr_t p0 = arg1; - intptr_t p1 = arg2; - #endif --#if !ABI_PASSES_HANDLES_IN_REGS -+#if !defined(ABI_PASSES_HANDLES_IN_REGS) - p0 = *(reinterpret_cast(p0)); - #endif - v8::Handle result = target(p0, p1); - if (::v8::internal::FLAG_trace_sim) { - PrintF("Returned %p\n", reinterpret_cast(*result)); - } --#if ABI_RETURNS_HANDLES_IN_REGS -+#ifdef ABI_RETURNS_HANDLES_IN_REGS - arg0 = (intptr_t)*result; - #else - *(reinterpret_cast(arg0)) = (intptr_t) *result; -@@ -1408,7 +1408,7 @@ void Simulator::SoftwareInterrupt(Instru - } - CHECK(stack_aligned); - int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - if (::v8::internal::FLAG_trace_sim) { - PrintF("Returned %08" V8PRIxPTR "\n", result); - } -@@ -1671,7 +1671,7 @@ bool Simulator::DecodeExt2_10bit(Instruc - } - break; - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - case SRDX: { - int rs = instr->RSValue(); - int ra = instr->RAValue(); -@@ -1699,7 +1699,7 @@ bool Simulator::DecodeExt2_10bit(Instruc - } - break; - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - case SRAD: { - int rs = instr->RSValue(); - int ra = instr->RAValue(); -@@ -1726,7 +1726,7 @@ bool Simulator::DecodeExt2_10bit(Instruc - } - break; - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - case EXTSW: { - const int shift = kBitsPerPointer - 32; - int ra = instr->RAValue(); -@@ -1980,7 +1980,7 @@ void Simulator::DecodeExt2_9bit(Instruct - } - break; - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - case SLDX: { - int rs = instr->RSValue(); - int ra = instr->RAValue(); -@@ -2017,7 +2017,7 @@ void Simulator::DecodeExt2_9bit(Instruct - } - break; - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - case CNTLZDX: { - int rs = instr->RSValue(); - int ra = instr->RAValue(); -@@ -2139,7 +2139,7 @@ void Simulator::DecodeExt2_9bit(Instruct - // todo - handle OE bit - break; - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - case MULLD: { - int rt = instr->RTValue(); - int ra = instr->RAValue(); -@@ -2170,7 +2170,7 @@ void Simulator::DecodeExt2_9bit(Instruct - // todo - handle OE bit - break; - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - case DIVD: { - int rt = instr->RTValue(); - int ra = instr->RAValue(); -@@ -2316,7 +2316,7 @@ void Simulator::DecodeExt2_9bit(Instruct - } - break; - } --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - case LDX: - case LDUX: { - int rt = instr->RTValue(); -@@ -2672,7 +2672,7 @@ void Simulator::DecodeExt4(Instruction* - UNIMPLEMENTED(); // Not used by V8. - } - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - void Simulator::DecodeExt5(Instruction* instr) { - switch (instr->Bits(4, 2) << 2) { - case RLDICL: { -@@ -3195,7 +3195,7 @@ void Simulator::InstructionDecode(Instru - break; - } - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - case EXT5: { - DecodeExt5(instr); - break; -@@ -3339,7 +3339,7 @@ intptr_t Simulator::Call(byte* entry, in - set_register(sp, entry_stack); - - // Prepare to execute the code at entry --#if ABI_USES_FUNCTION_DESCRIPTORS -+#ifdef ABI_USES_FUNCTION_DESCRIPTORS - // entry is the function descriptor - set_pc(*(reinterpret_cast(entry))); - #else -diff -up v8-3.14.5.10/src/ppc/stub-cache-ppc.cc.ppc-harder v8-3.14.5.10/src/ppc/stub-cache-ppc.cc ---- v8-3.14.5.10/src/ppc/stub-cache-ppc.cc.ppc-harder 2017-03-01 12:47:36.497606607 -0500 -+++ v8-3.14.5.10/src/ppc/stub-cache-ppc.cc 2017-03-01 12:47:36.521606006 -0500 -@@ -198,7 +198,7 @@ void StubCache::GenerateProbe(MacroAssem - Isolate* isolate = masm->isolate(); - Label miss; - --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - // Make sure that code is valid. The multiplying code relies on the - // entry size being 24. - ASSERT(sizeof(Entry) == 24); -@@ -239,7 +239,7 @@ void StubCache::GenerateProbe(MacroAssem - __ lwz(scratch, FieldMemOperand(name, String::kHashFieldOffset)); - __ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); - __ add(scratch, scratch, ip); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - // Use only the low 32 bits of the map pointer. - __ rldicl(scratch, scratch, 0, 32); - #endif -@@ -728,7 +728,7 @@ static void GenerateFastApiDirectCall(Ma - // Prepare arguments. - __ addi(r5, sp, Operand(3 * kPointerSize)); - --#if !ABI_RETURNS_HANDLES_IN_REGS -+#if !defined(ABI_RETURNS_HANDLES_IN_REGS) - bool alloc_return_buf = true; - #else - bool alloc_return_buf = false; -@@ -1213,7 +1213,7 @@ void StubCompiler::GenerateLoadCallback( - Handle callback, - Handle name, - Label* miss) { --#if !ABI_RETURNS_HANDLES_IN_REGS -+#if !defined(ABI_RETURNS_HANDLES_IN_REGS) - bool alloc_return_buf = true; - #else - bool alloc_return_buf = false; -@@ -1263,7 +1263,7 @@ void StubCompiler::GenerateLoadCallback( - // If alloc_return_buf, we shift the arguments over a register - // (e.g. r3 -> r4) to allow for the return value buffer in implicit - // first arg. CallApiFunctionAndReturn will setup r3. --#if ABI_PASSES_HANDLES_IN_REGS -+#ifdef ABI_PASSES_HANDLES_IN_REGS - const int kAccessorInfoSlot = kStackFrameExtraParamSlot + - (alloc_return_buf ? 2 : 1); - #else -@@ -1281,7 +1281,7 @@ void StubCompiler::GenerateLoadCallback( - FrameScope frame_scope(masm(), StackFrame::MANUAL); - __ EnterExitFrame(false, kApiStackSpace); - --#if !ABI_PASSES_HANDLES_IN_REGS -+#if !defined(ABI_PASSES_HANDLES_IN_REGS) - // pass 1st arg by reference - __ StoreP(arg0, MemOperand(sp, kArg0Slot * kPointerSize)); - __ addi(arg0, sp, Operand(kArg0Slot * kPointerSize)); -@@ -2155,7 +2155,7 @@ Handle CallStubCompiler::CompileMa - // The frim instruction is only supported on POWER5 - // and higher - __ frim(d1, d1); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ fctidz(d1, d1); - #else - __ fctiwz(d1, d1); -@@ -2166,7 +2166,7 @@ Handle CallStubCompiler::CompileMa - // perf benefit or if we can simply use the compatible sequence - // always - __ SetRoundingMode(kRoundToMinusInf); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ fctid(d1, d1); - #else - __ fctiw(d1, d1); -@@ -2175,7 +2175,7 @@ Handle CallStubCompiler::CompileMa - } - // Convert the argument to an integer. - __ stfdu(d1, MemOperand(sp, -8)); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ ld(r3, MemOperand(sp, 0)); - #else - #if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN -@@ -3623,7 +3623,7 @@ static void GenerateSmiKeyCheck(MacroAss - double_scratch1, - kCheckForInexactConversion); - __ bne(fail); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ SmiTag(key, scratch0); - #else - __ SmiTagCheckOverflow(scratch1, scratch0, r0); -@@ -3692,7 +3692,7 @@ void KeyedLoadStubCompiler::GenerateLoad - case EXTERNAL_INT_ELEMENTS: - __ SmiToIntArrayOffset(value, key); - __ lwzx(value, MemOperand(r6, value)); --#if V8_TARGET_ARCH_PPC64 -+#ifdef V8_TARGET_ARCH_PPC64 - __ extsw(value, value); - #endif - break; -@@ -3731,7 +3731,7 @@ void KeyedLoadStubCompiler::GenerateLoad - // For the Int and UnsignedInt array types, we need to see whether - // the value can be represented in a Smi. If not, we need to convert - // it to a HeapNumber. --#if !V8_TARGET_ARCH_PPC64 -+#if !defined(V8_TARGET_ARCH_PPC64) - Label box_int; - // Check that the value fits in a smi. - __ JumpIfNotSmiCandidate(value, r0, &box_int); -@@ -3740,7 +3740,7 @@ void KeyedLoadStubCompiler::GenerateLoad - __ SmiTag(r3, value); - __ Ret(); - --#if !V8_TARGET_ARCH_PPC64 -+#if !defined(V8_TARGET_ARCH_PPC64) - __ bind(&box_int); - // Allocate a HeapNumber for the result and perform int-to-double - // conversion. Don't touch r3 or r4 as they are needed if allocation -diff -up v8-3.14.5.10/src/SConscript.ppc-harder v8-3.14.5.10/src/SConscript ---- v8-3.14.5.10/src/SConscript.ppc-harder 2012-07-24 03:59:48.000000000 -0400 -+++ v8-3.14.5.10/src/SConscript 2017-03-01 14:13:21.244294435 -0500 -@@ -204,6 +204,46 @@ SOURCES = { - ia32/regexp-macro-assembler-ia32.cc - ia32/stub-cache-ia32.cc - """), -+ 'arch:ppc': Split(""" -+ ppc/assembler-ppc.cc -+ ppc/builtins-ppc.cc -+ ppc/code-stubs-ppc.cc -+ ppc/codegen-ppc.cc -+ ppc/constants-ppc.cc -+ ppc/cpu-ppc.cc -+ ppc/debug-ppc.cc -+ ppc/deoptimizer-ppc.cc -+ ppc/disasm-ppc.cc -+ ppc/frames-ppc.cc -+ ppc/full-codegen-ppc.cc -+ ppc/ic-ppc.cc -+ ppc/lithium-codegen-ppc.cc -+ ppc/lithium-gap-resolver-ppc.cc -+ ppc/lithium-ppc.cc -+ ppc/macro-assembler-ppc.cc -+ ppc/regexp-macro-assembler-ppc.cc -+ ppc/stub-cache-ppc.cc -+ """), -+ 'arch:ppc64': Split(""" -+ ppc/assembler-ppc.cc -+ ppc/builtins-ppc.cc -+ ppc/code-stubs-ppc.cc -+ ppc/codegen-ppc.cc -+ ppc/constants-ppc.cc -+ ppc/cpu-ppc.cc -+ ppc/debug-ppc.cc -+ ppc/deoptimizer-ppc.cc -+ ppc/disasm-ppc.cc -+ ppc/frames-ppc.cc -+ ppc/full-codegen-ppc.cc -+ ppc/ic-ppc.cc -+ ppc/lithium-codegen-ppc.cc -+ ppc/lithium-gap-resolver-ppc.cc -+ ppc/lithium-ppc.cc -+ ppc/macro-assembler-ppc.cc -+ ppc/regexp-macro-assembler-ppc.cc -+ ppc/stub-cache-ppc.cc -+ """), - 'arch:x64': Split(""" - x64/assembler-x64.cc - x64/builtins-x64.cc -@@ -225,6 +265,8 @@ SOURCES = { - """), - 'simulator:arm': ['arm/simulator-arm.cc'], - 'simulator:mips': ['mips/simulator-mips.cc'], -+ 'simulator:ppc': ['ppc/simulator-ppc.cc'], -+ 'simulator:ppc64': ['ppc/simulator-ppc.cc'], - 'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'], - 'os:openbsd': ['platform-openbsd.cc', 'platform-posix.cc'], - 'os:linux': ['platform-linux.cc', 'platform-posix.cc'], diff --git a/v8-powerpc-support.patch b/v8-powerpc-support.patch deleted file mode 100644 index ed8f129..0000000 --- a/v8-powerpc-support.patch +++ /dev/null @@ -1,57926 +0,0 @@ -diff -up v8-3.14.5.10/aix_gyp.patch.ppc v8-3.14.5.10/aix_gyp.patch ---- v8-3.14.5.10/aix_gyp.patch.ppc 2016-06-07 14:15:45.971393122 -0400 -+++ v8-3.14.5.10/aix_gyp.patch 2016-06-07 14:15:45.971393122 -0400 -@@ -0,0 +1,62 @@ -+--- build/gyp/pylib/gyp/common.py -++++ build/gyp/pylib/gyp/common.py -+@@ -378,6 +378,8 @@ -+ return 'solaris' -+ if sys.platform.startswith('freebsd'): -+ return 'freebsd' -++ if sys.platform.startswith('aix'): -++ return 'aix' -+ -+ return 'linux' -+ -+--- build/gyp/pylib/gyp/generator/make.py -++++ build/gyp/pylib/gyp/generator/make.py -+@@ -200,6 +200,21 @@ -+ """ -+ -+ -++ LINK_COMMANDS_AIX = """\ -++ quiet_cmd_alink = AR($(TOOLSET)) $@ -++ cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) $(ARFLAGS.$(TOOLSET)) $@ $(filter %.o,$^) -++ -++ quiet_cmd_link = LINK($(TOOLSET)) $@ -++ cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS) -++ -++ quiet_cmd_solink = SOLINK($(TOOLSET)) $@ -++ cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS) -++ -++ quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@ -++ cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS) -++ """ -++ -++ -+ # Header of toplevel Makefile. -+ # This should go into the build tree, but it's easier to keep it here for now. -+ SHARED_HEADER = ("""\ -+ -+--- build/gyp/pylib/gyp/generator/make.py -++++ build/gyp/pylib/gyp/generator/make.py -+@@ -1933,6 +1948,10 @@ -+ cc_command=cc_host): -+ arflags_host = 'crsT' -+ -++ if flavor == 'aix': -++ arflags_target = '-Xany ' + arflags_target -++ arflags_host = '-Xany ' + arflags_host -++ -+ return { 'ARFLAGS.target': arflags_target, -+ 'ARFLAGS.host': arflags_host } -+ -+--- build/gyp/pylib/gyp/generator/make.py -++++ build/gyp/pylib/gyp/generator/make.py -+@@ -2026,6 +2045,10 @@ -+ elif flavor == 'freebsd': -+ header_params.update({ -+ 'flock': 'lockf', -++ }) -++ elif flavor == 'aix': -++ header_params.update({ -++ 'link_commands': LINK_COMMANDS_AIX, -+ }) -+ -+ header_params.update(RunSystemTests(flavor)) -diff -up v8-3.14.5.10/AUTHORS.ppc v8-3.14.5.10/AUTHORS ---- v8-3.14.5.10/AUTHORS.ppc 2013-02-07 07:26:07.000000000 -0500 -+++ v8-3.14.5.10/AUTHORS 2016-06-07 14:15:45.970393128 -0400 -@@ -7,6 +7,7 @@ Google Inc. - Sigma Designs Inc. - ARM Ltd. - Hewlett-Packard Development Company, LP -+IBM Corporation - Igalia, S.L. - Joyent, Inc. - -@@ -15,11 +16,13 @@ Alexander Botero-Lowry - Alexandre Vassalotti - Andreas Anyuru -+Andrew Low - Bert Belder - Burcu Dogan - Craig Schlenter - Daniel Andersson - Daniel James -+David Eelsohn - Derek J Conrod - Dineel D Sule - Erich Ocean -@@ -37,6 +40,7 @@ Kun Zhang - Martyn Capewell - Mathias Bynens - Matt Hanselman -+Matthew Brandyberry - Maxim Mossienko - Michael Lutz - Michael Smith -diff -up v8-3.14.5.10/build/common.gypi.ppc v8-3.14.5.10/build/common.gypi ---- v8-3.14.5.10/build/common.gypi.ppc 2016-06-07 14:15:45.958393199 -0400 -+++ v8-3.14.5.10/build/common.gypi 2016-06-07 14:17:26.697791944 -0400 -@@ -70,9 +70,13 @@ - - 'v8_enable_disassembler%': 0, - -+ 'v8_native_sim%': 'false', -+ - # Enable extra checks in API functions and other strategic places. - 'v8_enable_extra_checks%': 1, - -+ 'v8_enable_extra_ppcchecks%': 0, -+ - 'v8_enable_gdbjit%': 0, - - 'v8_object_print%': 0, -@@ -117,6 +121,9 @@ - ['v8_enable_extra_checks==1', { - 'defines': ['ENABLE_EXTRA_CHECKS',], - }], -+ ['v8_enable_extra_ppcchecks==1', { -+ 'defines': ['ENABLE_EXTRA_PPCCHECKS',], -+ }], - ['v8_enable_gdbjit==1', { - 'defines': ['ENABLE_GDB_JIT_INTERFACE',], - }], -@@ -129,6 +136,12 @@ - ['v8_interpreted_regexp==1', { - 'defines': ['V8_INTERPRETED_REGEXP',], - }], -+ ['v8_native_sim=="true"', { -+ 'defines': [ -+ 'NATIVE_SIMULATION', -+ 'USE_SIMULATOR', -+ ], -+ }], - ['v8_target_arch=="arm"', { - 'defines': [ - 'V8_TARGET_ARCH_ARM', -@@ -171,6 +184,17 @@ - }], - ], - }], # v8_target_arch=="arm" -+ ['v8_target_arch=="ppc"', { -+ 'defines': [ -+ 'V8_TARGET_ARCH_PPC', -+ ], -+ }], # v8_target_arch=="ppc" -+ ['v8_target_arch=="ppc64"', { -+ 'defines': [ -+ 'V8_TARGET_ARCH_PPC', -+ 'V8_TARGET_ARCH_PPC64', -+ ], -+ }], # v8_target_arch=="ppc64" - ['v8_target_arch=="ia32"', { - 'defines': [ - 'V8_TARGET_ARCH_IA32', -@@ -283,7 +307,7 @@ - }, - }], - ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ -- or OS=="netbsd"', { -+ or OS=="netbsd" or OS=="aix"', { - 'conditions': [ - [ 'v8_no_strict_aliasing==1', { - 'cflags': [ '-fno-strict-aliasing' ], -@@ -296,7 +320,7 @@ - ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ - or OS=="netbsd" or OS=="mac" or OS=="android") and \ - (v8_target_arch=="arm" or v8_target_arch=="ia32" or \ -- v8_target_arch=="mipsel" or v8_target_arch=="mips")', { -+ v8_target_arch=="mipsel" or v8_target_arch=="mips" or v8_target_arch=="ppc")', { - # Check whether the host compiler and target compiler support the - # '-m32' option and set it if so. - 'target_conditions': [ -@@ -333,6 +357,20 @@ - ['OS=="netbsd"', { - 'cflags': [ '-I/usr/pkg/include' ], - }], -+ ['OS=="aix"', { -+ # AIX is missing /usr/include/endian.h -+ 'defines': [ -+ '__LITTLE_ENDIAN=1234', -+ '__BIG_ENDIAN=4321', -+ '__BYTE_ORDER=__BIG_ENDIAN', -+ '__FLOAT_WORD_ORDER=__BIG_ENDIAN'], -+ 'conditions': [ -+ [ 'v8_target_arch=="ppc64"', { -+ 'cflags': [ '-maix64' ], -+ 'ldflags': [ '-maix64' ], -+ }], -+ ], -+ }], - ], # conditions - 'configurations': { - 'Debug': { -@@ -360,10 +398,14 @@ - }, - }, - 'conditions': [ -- ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', { -+ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \ -+ or OS=="aix"', { - 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', - '-Wnon-virtual-dtor', '-Woverloaded-virtual' ], - }], -+ ['OS=="aix"', { -+ 'ldflags': [ '-Wl,-bbigtoc' ], -+ }], - ['OS=="android"', { - 'variables': { - 'android_full_debug%': 1, -@@ -383,7 +425,7 @@ - 'Release': { - 'conditions': [ - ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \ -- or OS=="android"', { -+ or OS=="android" or OS=="aix"', { - 'cflags!': [ - '-O2', - '-Os', -diff -up v8-3.14.5.10/build/standalone.gypi.ppc v8-3.14.5.10/build/standalone.gypi ---- v8-3.14.5.10/build/standalone.gypi.ppc 2016-06-07 14:15:45.958393199 -0400 -+++ v8-3.14.5.10/build/standalone.gypi 2016-06-07 14:15:45.971393122 -0400 -@@ -31,7 +31,6 @@ - 'variables': { - 'library%': 'static_library', - 'component%': 'static_library', -- 'visibility%': 'hidden', - 'msvs_multi_core_compile%': '1', - 'mac_deployment_target%': '10.5', - 'variables': { -@@ -39,7 +38,7 @@ - 'variables': { - 'conditions': [ - ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or \ -- OS=="netbsd" or OS=="mac"', { -+ OS=="netbsd" or OS=="mac" or OS=="aix"', { - # This handles the Unix platforms we generally deal with. - # Anything else gets passed through, which probably won't work - # very well; such hosts should pass an explicit target_arch -@@ -49,7 +48,7 @@ - s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/;s/mips.*/mipsel/")', - }, { - # OS!="linux" and OS!="freebsd" and OS!="openbsd" and -- # OS!="netbsd" and OS!="mac" -+ # OS!="netbsd" and OS!="mac" and OS!="aix" - 'host_arch%': 'ia32', - }], - ], -@@ -75,6 +74,12 @@ - }, { - 'want_separate_host_toolset': 0, - }], -+ # -+ ['OS=="aix"', { -+ 'visibility%': '', -+ }, { -+ 'visibility%': 'hidden', -+ }], - ], - # Default ARM variable settings. - 'armv7%': 1, -@@ -86,12 +91,17 @@ - 'configurations': { - 'Debug': { - 'cflags': [ '-g', '-O0' ], -+ 'conditions': [ -+ [ 'OS=="aix"', { -+ 'cflags': [ '-gxcoff' ], -+ }], -+ ], - }, - }, - }, - 'conditions': [ - ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ -- or OS=="netbsd"', { -+ or OS=="netbsd" or OS=="aix"', { - 'target_defaults': { - 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', - '-Wnon-virtual-dtor', '-pthread', '-fno-rtti', -@@ -101,6 +111,9 @@ - [ 'OS=="linux"', { - 'cflags': [ '-ansi' ], - }], -+ [ 'host_arch=="ppc64"', { -+ 'cflags': [ '-mminimal-toc' ], -+ }], - [ 'visibility=="hidden"', { - 'cflags': [ '-fvisibility=hidden' ], - }], -diff -up v8-3.14.5.10/.gitignore.ppc v8-3.14.5.10/.gitignore ---- v8-3.14.5.10/.gitignore.ppc 2012-10-22 09:09:53.000000000 -0400 -+++ v8-3.14.5.10/.gitignore 2016-06-07 14:15:45.970393128 -0400 -@@ -12,6 +12,7 @@ - *.sln - *.so - *.suo -+*.swp - *.user - *.vcproj - *.xcodeproj -diff -up v8-3.14.5.10/Makefile.ppc v8-3.14.5.10/Makefile ---- v8-3.14.5.10/Makefile.ppc 2016-06-07 14:15:45.958393199 -0400 -+++ v8-3.14.5.10/Makefile 2016-06-07 14:15:45.970393128 -0400 -@@ -73,6 +73,10 @@ endif - ifeq ($(extrachecks), off) - GYPFLAGS += -Dv8_enable_extra_checks=0 - endif -+# extrachecks=off -+ifeq ($(extrappcchecks), on) -+ GYPFLAGS += -Dv8_enable_extra_ppcchecks=1 -+endif - # gdbjit=on - ifeq ($(gdbjit), on) - GYPFLAGS += -Dv8_enable_gdbjit=1 -@@ -115,6 +119,10 @@ endif - ifeq ($(hardfp), on) - GYPFLAGS += -Dv8_use_arm_eabi_hardfloat=true - endif -+# nativesim=true -+ifeq ($(nativesim), true) -+ GYPFLAGS += -Dv8_native_sim=true -+endif - - # ----------------- available targets: -------------------- - # - "dependencies": pulls in external dependencies (currently: GYP) -@@ -133,8 +141,8 @@ endif - - # Architectures and modes to be compiled. Consider these to be internal - # variables, don't override them (use the targets instead). --ARCHES = ia32 x64 arm mipsel mips --DEFAULT_ARCHES = ia32 x64 arm -+ARCHES = ia32 x64 arm ppc mipsel mips ppc64 -+DEFAULT_ARCHES = ia32 x64 arm ppc ppc64 - MODES = release debug - ANDROID_ARCHES = android_ia32 android_arm - -diff -up v8-3.14.5.10/README.md.ppc v8-3.14.5.10/README.md ---- v8-3.14.5.10/README.md.ppc 2016-06-07 14:15:45.970393128 -0400 -+++ v8-3.14.5.10/README.md 2016-06-07 14:15:45.970393128 -0400 -@@ -0,0 +1,10 @@ -+v8ppc -+===== -+ -+Port of Google V8 javascript engine to PowerPC - PowerLinux and AIX. -+ -+This branch of the code (libv8-3.14) is intended to match the 3.14.5.8 -+level of V8 that is used by the libv8 library built as part of Ubuntu -+ -+http://packages.ubuntu.com/source/trusty/libv8-3.14 -+ -diff -up v8-3.14.5.10/src/assembler.cc.ppc v8-3.14.5.10/src/assembler.cc ---- v8-3.14.5.10/src/assembler.cc.ppc 2012-10-15 07:51:39.000000000 -0400 -+++ v8-3.14.5.10/src/assembler.cc 2016-06-07 14:15:45.971393122 -0400 -@@ -61,6 +61,8 @@ - #include "x64/assembler-x64-inl.h" - #elif V8_TARGET_ARCH_ARM - #include "arm/assembler-arm-inl.h" -+#elif V8_TARGET_ARCH_PPC -+#include "ppc/assembler-ppc-inl.h" - #elif V8_TARGET_ARCH_MIPS - #include "mips/assembler-mips-inl.h" - #else -@@ -75,6 +77,8 @@ - #include "x64/regexp-macro-assembler-x64.h" - #elif V8_TARGET_ARCH_ARM - #include "arm/regexp-macro-assembler-arm.h" -+#elif V8_TARGET_ARCH_PPC -+#include "ppc/regexp-macro-assembler-ppc.h" - #elif V8_TARGET_ARCH_MIPS - #include "mips/regexp-macro-assembler-mips.h" - #else // Unknown architecture. -@@ -1064,6 +1068,8 @@ ExternalReference ExternalReference::re_ - function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState); - #elif V8_TARGET_ARCH_ARM - function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState); -+#elif V8_TARGET_ARCH_PPC -+ function = FUNCTION_ADDR(RegExpMacroAssemblerPPC::CheckStackGuardState); - #elif V8_TARGET_ARCH_MIPS - function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState); - #else -@@ -1221,6 +1227,21 @@ double power_double_double(double x, dou - } - - if (x == 2.0) { -+ int y_int = static_cast(y); -+ if (y == y_int) return ldexp(1.0, y_int); -+ } -+#elif defined(_AIX) -+ // AIX has a custom implementation for pow. This handles certain -+ // special cases that are different. -+ if ((x == 0.0 || isinf(x)) && y != 0.0 && isfinite(y)) { -+ double f; -+ double result = ((x == 0.0) ^ (y > 0)) ? V8_INFINITY : 0; -+ /* retain sign if odd integer exponent */ -+ return ((modf(y, &f) == 0.0) && (static_cast(y) & 1)) ? -+ copysign(result, x) : result; -+ } -+ -+ if (x == 2.0) { - int y_int = static_cast(y); - if (y == y_int) return ldexp(1.0, y_int); - } -diff -up v8-3.14.5.10/src/assembler.h.ppc v8-3.14.5.10/src/assembler.h ---- v8-3.14.5.10/src/assembler.h.ppc 2012-10-22 09:09:53.000000000 -0400 -+++ v8-3.14.5.10/src/assembler.h 2016-06-07 14:15:45.971393122 -0400 -@@ -829,31 +829,33 @@ class PreservePositionScope BASE_EMBEDDE - // ----------------------------------------------------------------------------- - // Utility functions - --inline bool is_intn(int x, int n) { -- return -(1 << (n-1)) <= x && x < (1 << (n-1)); -+inline bool is_intn(intptr_t x, int n) { -+ return -(1L << (n-1)) <= x && x < (1L << (n-1)); - } - --inline bool is_int8(int x) { return is_intn(x, 8); } --inline bool is_int16(int x) { return is_intn(x, 16); } --inline bool is_int18(int x) { return is_intn(x, 18); } --inline bool is_int24(int x) { return is_intn(x, 24); } -+inline bool is_int8(intptr_t x) { return is_intn(x, 8); } -+inline bool is_int16(intptr_t x) { return is_intn(x, 16); } -+inline bool is_int18(intptr_t x) { return is_intn(x, 18); } -+inline bool is_int24(intptr_t x) { return is_intn(x, 24); } -+inline bool is_int26(intptr_t x) { return is_intn(x, 26); } - --inline bool is_uintn(int x, int n) { -- return (x & -(1 << n)) == 0; -+ -+inline bool is_uintn(intptr_t x, int n) { -+ return (x & -(1L << n)) == 0; - } - --inline bool is_uint2(int x) { return is_uintn(x, 2); } --inline bool is_uint3(int x) { return is_uintn(x, 3); } --inline bool is_uint4(int x) { return is_uintn(x, 4); } --inline bool is_uint5(int x) { return is_uintn(x, 5); } --inline bool is_uint6(int x) { return is_uintn(x, 6); } --inline bool is_uint8(int x) { return is_uintn(x, 8); } --inline bool is_uint10(int x) { return is_uintn(x, 10); } --inline bool is_uint12(int x) { return is_uintn(x, 12); } --inline bool is_uint16(int x) { return is_uintn(x, 16); } --inline bool is_uint24(int x) { return is_uintn(x, 24); } --inline bool is_uint26(int x) { return is_uintn(x, 26); } --inline bool is_uint28(int x) { return is_uintn(x, 28); } -+inline bool is_uint2(intptr_t x) { return is_uintn(x, 2); } -+inline bool is_uint3(intptr_t x) { return is_uintn(x, 3); } -+inline bool is_uint4(intptr_t x) { return is_uintn(x, 4); } -+inline bool is_uint5(intptr_t x) { return is_uintn(x, 5); } -+inline bool is_uint6(intptr_t x) { return is_uintn(x, 6); } -+inline bool is_uint8(intptr_t x) { return is_uintn(x, 8); } -+inline bool is_uint10(intptr_t x) { return is_uintn(x, 10); } -+inline bool is_uint12(intptr_t x) { return is_uintn(x, 12); } -+inline bool is_uint16(intptr_t x) { return is_uintn(x, 16); } -+inline bool is_uint24(intptr_t x) { return is_uintn(x, 24); } -+inline bool is_uint26(intptr_t x) { return is_uintn(x, 26); } -+inline bool is_uint28(intptr_t x) { return is_uintn(x, 28); } - - inline int NumberOfBitsSet(uint32_t x) { - unsigned int num_bits_set; -diff -up v8-3.14.5.10/src/atomicops.h.ppc v8-3.14.5.10/src/atomicops.h ---- v8-3.14.5.10/src/atomicops.h.ppc 2012-09-20 08:51:09.000000000 -0400 -+++ v8-3.14.5.10/src/atomicops.h 2016-06-07 14:15:45.971393122 -0400 -@@ -69,7 +69,8 @@ typedef intptr_t Atomic64; - - // Use AtomicWord for a machine-sized pointer. It will use the Atomic32 or - // Atomic64 routines below, depending on your architecture. --#if defined(__OpenBSD__) && defined(__i386__) -+#if !defined(V8_HOST_ARCH_64_BIT) && \ -+ ((defined(__OpenBSD__) && defined(__i386__)) || defined(_AIX)) - typedef Atomic32 AtomicWord; - #else - typedef intptr_t AtomicWord; -@@ -162,6 +163,8 @@ Atomic64 Release_Load(volatile const Ato - #include "atomicops_internals_x86_gcc.h" - #elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM) - #include "atomicops_internals_arm_gcc.h" -+#elif defined(__GNUC__) && defined(V8_HOST_ARCH_PPC) -+#include "atomicops_internals_ppc_gcc.h" - #elif defined(__GNUC__) && defined(V8_HOST_ARCH_MIPS) - #include "atomicops_internals_mips_gcc.h" - #else -diff -up v8-3.14.5.10/src/atomicops_internals_ppc_gcc.h.ppc v8-3.14.5.10/src/atomicops_internals_ppc_gcc.h ---- v8-3.14.5.10/src/atomicops_internals_ppc_gcc.h.ppc 2016-06-07 14:15:45.971393122 -0400 -+++ v8-3.14.5.10/src/atomicops_internals_ppc_gcc.h 2016-06-07 14:15:45.971393122 -0400 -@@ -0,0 +1,167 @@ -+// Copyright 2010 the V8 project authors. All rights reserved. -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions are -+// met: -+// -+// * Redistributions of source code must retain the above copyright -+// notice, this list of conditions and the following disclaimer. -+// * Redistributions in binary form must reproduce the above -+// copyright notice, this list of conditions and the following -+// disclaimer in the documentation and/or other materials provided -+// with the distribution. -+// * Neither the name of Google Inc. nor the names of its -+// contributors may be used to endorse or promote products derived -+// from this software without specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+// This file is an internal atomic implementation, use atomicops.h instead. -+// -+ -+#ifndef V8_ATOMICOPS_INTERNALS_PPC_H_ -+#define V8_ATOMICOPS_INTERNALS_PPC_H_ -+ -+namespace v8 { -+namespace internal { -+ -+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, -+ Atomic32 old_value, -+ Atomic32 new_value) { -+ return(__sync_val_compare_and_swap( ptr, old_value, new_value)); -+} -+ -+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, -+ Atomic32 new_value) { -+ Atomic32 old_value; -+ do { -+ old_value = *ptr; -+ } while (__sync_bool_compare_and_swap(ptr, old_value, new_value)); -+ return old_value; -+} -+ -+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, -+ Atomic32 increment) { -+ return Barrier_AtomicIncrement(ptr, increment); -+} -+ -+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, -+ Atomic32 increment) { -+ for (;;) { -+ Atomic32 old_value = *ptr; -+ Atomic32 new_value = old_value + increment; -+ if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) { -+ return new_value; -+ // The exchange took place as expected. -+ } -+ // Otherwise, *ptr changed mid-loop and we need to retry. -+ } -+} -+ -+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, -+ Atomic32 old_value, -+ Atomic32 new_value) { -+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -+} -+ -+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, -+ Atomic32 old_value, -+ Atomic32 new_value) { -+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -+} -+ -+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { -+ *ptr = value; -+} -+ -+inline void MemoryBarrier() { -+ __asm__ __volatile__("sync" : : : "memory"); -+} -+ -+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { -+ *ptr = value; -+ MemoryBarrier(); -+} -+ -+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { -+ MemoryBarrier(); -+ *ptr = value; -+} -+ -+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { -+ return *ptr; -+} -+ -+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { -+ Atomic32 value = *ptr; -+ MemoryBarrier(); -+ return value; -+} -+ -+inline Atomic32 Release_Load(volatile const Atomic32* ptr) { -+ MemoryBarrier(); -+ return *ptr; -+} -+ -+#ifdef V8_TARGET_ARCH_PPC64 -+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, -+ Atomic64 old_value, -+ Atomic64 new_value) { -+ return(__sync_val_compare_and_swap( ptr, old_value, new_value)); -+} -+ -+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, -+ Atomic64 old_value, -+ Atomic64 new_value) { -+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -+} -+ -+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, -+ Atomic64 old_value, -+ Atomic64 new_value) { -+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value); -+} -+ -+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { -+ *ptr = value; -+} -+ -+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { -+ *ptr = value; -+ MemoryBarrier(); -+} -+ -+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { -+ MemoryBarrier(); -+ *ptr = value; -+} -+ -+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { -+ return *ptr; -+} -+ -+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { -+ Atomic64 value = *ptr; -+ MemoryBarrier(); -+ return value; -+} -+ -+inline Atomic64 Release_Load(volatile const Atomic64* ptr) { -+ MemoryBarrier(); -+ return *ptr; -+} -+ -+#endif -+ -+} } // namespace v8::internal -+ -+#endif // V8_ATOMICOPS_INTERNALS_PPC_GCC_H_ -diff -up v8-3.14.5.10/src/builtins.cc.ppc v8-3.14.5.10/src/builtins.cc ---- v8-3.14.5.10/src/builtins.cc.ppc 2012-10-10 13:07:22.000000000 -0400 -+++ v8-3.14.5.10/src/builtins.cc 2016-06-07 14:15:45.972393116 -0400 -@@ -1617,10 +1617,15 @@ void Builtins::SetUp(bool create_heap_ob - - const BuiltinDesc* functions = builtin_function_table.functions(); - -+#if V8_TARGET_ARCH_PPC64 -+ const int kBufferSize = 9 * KB; -+#else -+ const int kBufferSize = 8 * KB; -+#endif - // For now we generate builtin adaptor code into a stack-allocated - // buffer, before copying it into individual code objects. Be careful - // with alignment, some platforms don't like unaligned code. -- union { int force_alignment; byte buffer[8*KB]; } u; -+ union { int force_alignment; byte buffer[kBufferSize]; } u; - - // Traverse the list of builtins and generate an adaptor in a - // separate code object for each one. -diff -up v8-3.14.5.10/src/checks.h.ppc v8-3.14.5.10/src/checks.h ---- v8-3.14.5.10/src/checks.h.ppc 2016-06-07 14:15:45.928393378 -0400 -+++ v8-3.14.5.10/src/checks.h 2016-06-07 14:15:45.972393116 -0400 -@@ -299,4 +299,18 @@ extern bool FLAG_enable_slow_asserts; - #define EXTRA_CHECK(condition) ((void) 0) - #endif - -+// PENGUIN: Extra checks for PPC PORT -+// - PPCPORT_UNIMPLEMENTED: for unimplemented features -+// - PPCPORT_CHECK: for development phase -+// - PPCPORT_UNSAFE_IMPLEMENTATION: unsafe implementation -+#ifdef ENABLE_EXTRA_PPCCHECKS -+#define PPCPORT_CHECK(condition) CHECK(condition) -+#define PPCPORT_UNIMPLEMENTED() UNIMPLEMENTED() -+#define PPCPORT_UNSAFE_IMPLEMENTATION() ((void)0) -+#else -+#define PPCPORT_CHECK(condition) ((void) 0) -+#define PPCPORT_UNIMPLEMENTED() ((void) 0) -+#define PPCPORT_UNSAFE_IMPLEMENTATION() ((void)0) -+#endif -+ - #endif // V8_CHECKS_H_ -diff -up v8-3.14.5.10/src/codegen.cc.ppc v8-3.14.5.10/src/codegen.cc ---- v8-3.14.5.10/src/codegen.cc.ppc 2012-03-14 07:16:03.000000000 -0400 -+++ v8-3.14.5.10/src/codegen.cc 2016-06-07 14:15:45.972393116 -0400 -@@ -200,7 +200,7 @@ void ArgumentsAccessStub::Generate(Macro - int CEntryStub::MinorKey() { - int result = (save_doubles_ == kSaveFPRegs) ? 1 : 0; - ASSERT(result_size_ == 1 || result_size_ == 2); --#ifdef _WIN64 -+#if defined(_WIN64) || defined(V8_TARGET_ARCH_PPC64) - return result | ((result_size_ == 1) ? 0 : 2); - #else - return result; -diff -up v8-3.14.5.10/src/codegen.h.ppc v8-3.14.5.10/src/codegen.h ---- v8-3.14.5.10/src/codegen.h.ppc 2012-05-29 09:20:14.000000000 -0400 -+++ v8-3.14.5.10/src/codegen.h 2016-06-07 14:15:45.972393116 -0400 -@@ -75,6 +75,8 @@ enum TypeofState { INSIDE_TYPEOF, NOT_IN - #include "x64/codegen-x64.h" - #elif V8_TARGET_ARCH_ARM - #include "arm/codegen-arm.h" -+#elif V8_TARGET_ARCH_PPC -+#include "ppc/codegen-ppc.h" - #elif V8_TARGET_ARCH_MIPS - #include "mips/codegen-mips.h" - #else -diff -up v8-3.14.5.10/src/code-stubs.h.ppc v8-3.14.5.10/src/code-stubs.h ---- v8-3.14.5.10/src/code-stubs.h.ppc 2012-10-10 13:07:22.000000000 -0400 -+++ v8-3.14.5.10/src/code-stubs.h 2016-06-07 14:15:45.972393116 -0400 -@@ -88,6 +88,18 @@ namespace internal { - #define CODE_STUB_LIST_ARM(V) - #endif - -+// List of code stubs only used on PPC platforms. -+#ifdef V8_TARGET_ARCH_PPC -+#define CODE_STUB_LIST_PPC(V) \ -+ V(GetProperty) \ -+ V(SetProperty) \ -+ V(InvokeBuiltin) \ -+ V(RegExpCEntry) \ -+ V(DirectCEntry) -+#else -+#define CODE_STUB_LIST_PPC(V) -+#endif -+ - // List of code stubs only used on MIPS platforms. - #ifdef V8_TARGET_ARCH_MIPS - #define CODE_STUB_LIST_MIPS(V) \ -@@ -101,6 +113,7 @@ namespace internal { - #define CODE_STUB_LIST(V) \ - CODE_STUB_LIST_ALL_PLATFORMS(V) \ - CODE_STUB_LIST_ARM(V) \ -+ CODE_STUB_LIST_PPC(V) \ - CODE_STUB_LIST_MIPS(V) - - // Mode to overwrite BinaryExpression values. -@@ -254,6 +267,8 @@ class RuntimeCallHelper { - #include "x64/code-stubs-x64.h" - #elif V8_TARGET_ARCH_ARM - #include "arm/code-stubs-arm.h" -+#elif V8_TARGET_ARCH_PPC -+#include "ppc/code-stubs-ppc.h" - #elif V8_TARGET_ARCH_MIPS - #include "mips/code-stubs-mips.h" - #else -diff -up v8-3.14.5.10/src/conversions-inl.h.ppc v8-3.14.5.10/src/conversions-inl.h ---- v8-3.14.5.10/src/conversions-inl.h.ppc 2016-06-07 14:15:45.972393116 -0400 -+++ v8-3.14.5.10/src/conversions-inl.h 2016-06-07 14:19:40.280994665 -0400 -@@ -78,7 +78,7 @@ inline unsigned int FastD2UI(double x) { - #ifndef BIG_ENDIAN_FLOATING_POINT - Address mantissa_ptr = reinterpret_cast
(&x); - #else -- Address mantissa_ptr = reinterpret_cast
(&x) + 4; -+ Address mantissa_ptr = reinterpret_cast
(&x) + kIntSize; - #endif - // Copy least significant 32 bits of mantissa. - memcpy(&result, mantissa_ptr, sizeof(result)); -diff -up v8-3.14.5.10/src/d8.gyp.ppc v8-3.14.5.10/src/d8.gyp ---- v8-3.14.5.10/src/d8.gyp.ppc 2012-03-09 05:52:05.000000000 -0500 -+++ v8-3.14.5.10/src/d8.gyp 2016-06-07 14:15:45.972393116 -0400 -@@ -62,7 +62,8 @@ - 'sources': [ 'd8-readline.cc' ], - }], - ['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \ -- or OS=="openbsd" or OS=="solaris" or OS=="android")', { -+ or OS=="openbsd" or OS=="solaris" or OS=="android" \ -+ or OS=="aix")', { - 'sources': [ 'd8-posix.cc', ] - }], - [ 'OS=="win"', { -diff -up v8-3.14.5.10/src/deoptimizer.h.ppc v8-3.14.5.10/src/deoptimizer.h ---- v8-3.14.5.10/src/deoptimizer.h.ppc 2012-10-22 09:09:53.000000000 -0400 -+++ v8-3.14.5.10/src/deoptimizer.h 2016-06-07 14:15:45.972393116 -0400 -@@ -697,7 +697,11 @@ class SlotRef BASE_EMBEDDED { - return Handle(Memory::Object_at(addr_)); - - case INT32: { -+#if defined(V8_TARGET_ARCH_PPC64) && __BYTE_ORDER == __BIG_ENDIAN -+ int value = Memory::int32_at(addr_ + kIntSize); -+#else - int value = Memory::int32_at(addr_); -+#endif - if (Smi::IsValid(value)) { - return Handle(Smi::FromInt(value)); - } else { -@@ -706,7 +710,11 @@ class SlotRef BASE_EMBEDDED { - } - - case UINT32: { -+#if defined(V8_TARGET_ARCH_PPC64) && __BYTE_ORDER == __BIG_ENDIAN -+ uint32_t value = Memory::uint32_at(addr_ + kIntSize); -+#else - uint32_t value = Memory::uint32_at(addr_); -+#endif - if (value <= static_cast(Smi::kMaxValue)) { - return Handle(Smi::FromInt(static_cast(value))); - } else { -diff -up v8-3.14.5.10/src/disassembler.cc.ppc v8-3.14.5.10/src/disassembler.cc ---- v8-3.14.5.10/src/disassembler.cc.ppc 2012-06-27 07:12:38.000000000 -0400 -+++ v8-3.14.5.10/src/disassembler.cc 2016-06-07 14:15:45.972393116 -0400 -@@ -158,7 +158,11 @@ static int DecodeIt(FILE* f, - "%08" V8PRIxPTR " jump table entry %4" V8PRIdPTR, - ptr, - ptr - begin); -+#if V8_TARGET_ARCH_PPC64 -+ pc += 8; -+#else - pc += 4; -+#endif - } else { - decode_buffer[0] = '\0'; - pc += d.InstructionDecode(decode_buffer, pc); -diff -up v8-3.14.5.10/src/execution.h.ppc v8-3.14.5.10/src/execution.h ---- v8-3.14.5.10/src/execution.h.ppc 2012-07-24 03:59:48.000000000 -0400 -+++ v8-3.14.5.10/src/execution.h 2016-06-07 14:15:45.972393116 -0400 -@@ -258,7 +258,7 @@ class StackGuard { - void EnableInterrupts(); - void DisableInterrupts(); - --#ifdef V8_TARGET_ARCH_X64 -+#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_PPC64) - static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe); - static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8); - #else -diff -up v8-3.14.5.10/src/flag-definitions.h.ppc v8-3.14.5.10/src/flag-definitions.h ---- v8-3.14.5.10/src/flag-definitions.h.ppc 2013-05-23 06:49:13.000000000 -0400 -+++ v8-3.14.5.10/src/flag-definitions.h 2016-06-07 14:15:45.973393110 -0400 -@@ -442,6 +442,7 @@ DEFINE_bool(trace_parse, false, "trace p - - // simulator-arm.cc and simulator-mips.cc - DEFINE_bool(trace_sim, false, "Trace simulator execution") -+DEFINE_bool(trace_sim_stubs, false, "Trace simulator execution w/ stub markers") - DEFINE_bool(check_icache, false, - "Check icache flushes in ARM and MIPS simulator") - DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions") -diff -up v8-3.14.5.10/src/frames-inl.h.ppc v8-3.14.5.10/src/frames-inl.h ---- v8-3.14.5.10/src/frames-inl.h.ppc 2012-02-28 04:49:15.000000000 -0500 -+++ v8-3.14.5.10/src/frames-inl.h 2016-06-07 14:15:45.973393110 -0400 -@@ -38,6 +38,8 @@ - #include "x64/frames-x64.h" - #elif V8_TARGET_ARCH_ARM - #include "arm/frames-arm.h" -+#elif V8_TARGET_ARCH_PPC -+#include "ppc/frames-ppc.h" - #elif V8_TARGET_ARCH_MIPS - #include "mips/frames-mips.h" - #else -diff -up v8-3.14.5.10/src/full-codegen.h.ppc v8-3.14.5.10/src/full-codegen.h ---- v8-3.14.5.10/src/full-codegen.h.ppc 2012-08-10 10:46:33.000000000 -0400 -+++ v8-3.14.5.10/src/full-codegen.h 2016-06-07 14:15:45.973393110 -0400 -@@ -125,6 +125,8 @@ class FullCodeGenerator: public AstVisit - static const int kBackEdgeDistanceUnit = 162; - #elif V8_TARGET_ARCH_ARM - static const int kBackEdgeDistanceUnit = 142; -+#elif V8_TARGET_ARCH_PPC -+ static const int kBackEdgeDistanceUnit = 142; - #elif V8_TARGET_ARCH_MIPS - static const int kBackEdgeDistanceUnit = 142; - #else -@@ -333,12 +335,18 @@ class FullCodeGenerator: public AstVisit - Label* if_true, - Label* if_false, - Label* fall_through); --#else // All non-mips arch. -+#elif defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) -+ void Split(Condition cc, -+ Label* if_true, -+ Label* if_false, -+ Label* fall_through, -+ CRegister cr = cr7); -+#else // All other arch. - void Split(Condition cc, - Label* if_true, - Label* if_false, - Label* fall_through); --#endif // V8_TARGET_ARCH_MIPS -+#endif - - // Load the value of a known (PARAMETER, LOCAL, or CONTEXT) variable into - // a register. Emits a context chain walk if if necessary (so does -diff -up v8-3.14.5.10/src/globals.h.ppc v8-3.14.5.10/src/globals.h ---- v8-3.14.5.10/src/globals.h.ppc 2016-06-07 14:15:45.958393199 -0400 -+++ v8-3.14.5.10/src/globals.h 2016-06-07 14:24:39.511206075 -0400 -@@ -56,6 +56,11 @@ - #define V8_INFINITY HUGE_VAL - #endif - -+#ifdef _AIX -+#undef V8_INFINITY -+#define V8_INFINITY (__builtin_inff()) -+#endif -+ - - #include "../include/v8stdint.h" - -@@ -86,6 +91,13 @@ namespace internal { - #elif defined(__MIPSEL__) || defined(__MIPSEB__) - #define V8_HOST_ARCH_MIPS 1 - #define V8_HOST_ARCH_32_BIT 1 -+#elif defined(__PPC__) || defined(_ARCH_PPC) -+#define V8_HOST_ARCH_PPC 1 -+#if defined(__PPC64__) || defined(_ARCH_PPC64) -+#define V8_HOST_ARCH_64_BIT 1 -+#else -+#define V8_HOST_ARCH_32_BIT 1 -+#endif - #else - #error Host architecture was not detected as supported by v8 - #endif -@@ -94,7 +106,8 @@ namespace internal { - // in the same way as the host architecture, that is, target the native - // environment as presented by the compiler. - #if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_IA32) && \ -- !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS) -+ !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS) && \ -+ !defined(V8_TARGET_ARCH_PPC) - #if defined(_M_X64) || defined(__x86_64__) - #define V8_TARGET_ARCH_X64 1 - #elif defined(_M_IX86) || defined(__i386__) -@@ -112,6 +125,10 @@ namespace internal { - #define BIG_ENDIAN_FLOATING_POINT 1 - #endif - -+#if __BYTE_ORDER == __BIG_ENDIAN -+#define BIG_ENDIAN_FLOATING_POINT 1 -+#endif -+ - // Check for supported combinations of host and target architectures. - #if defined(V8_TARGET_ARCH_IA32) && !defined(V8_HOST_ARCH_IA32) - #error Target architecture ia32 is only supported on ia32 host -@@ -120,8 +137,9 @@ namespace internal { - #error Target architecture x64 is only supported on x64 host - #endif - #if (defined(V8_TARGET_ARCH_ARM) && \ -- !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_ARM))) --#error Target architecture arm is only supported on arm and ia32 host -+ !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_ARM) || \ -+ defined(V8_HOST_ARCH_PPC))) -+#error Target architecture arm is only supported on arm, ppc and ia32 host - #endif - #if (defined(V8_TARGET_ARCH_MIPS) && \ - !(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_MIPS))) -@@ -135,6 +153,9 @@ namespace internal { - #if (defined(V8_TARGET_ARCH_ARM) && !defined(V8_HOST_ARCH_ARM)) - #define USE_SIMULATOR 1 - #endif -+#if (defined(V8_TARGET_ARCH_PPC) && !defined(V8_HOST_ARCH_PPC)) -+#define USE_SIMULATOR 1 -+#endif - #if (defined(V8_TARGET_ARCH_MIPS) && !defined(V8_HOST_ARCH_MIPS)) - #define USE_SIMULATOR 1 - #endif -@@ -194,6 +215,16 @@ typedef byte* Address; - #define V8PRIdPTR V8_PTR_PREFIX "d" - #define V8PRIuPTR V8_PTR_PREFIX "u" - -+// Fix for AIX define intptr_t as "long int": -+#ifdef _AIX -+#undef V8_PTR_PREFIX -+#define V8_PTR_PREFIX "l" -+#undef V8PRIdPTR -+#define V8PRIdPTR "ld" -+#undef V8PRIxPTR -+#define V8PRIxPTR "lx" -+#endif -+ - // Fix for Mac OS X defining uintptr_t as "unsigned long": - #if defined(__APPLE__) && defined(__MACH__) - #undef V8PRIxPTR -diff -up v8-3.14.5.10/src/heap.cc.ppc v8-3.14.5.10/src/heap.cc ---- v8-3.14.5.10/src/heap.cc.ppc 2012-10-22 09:09:53.000000000 -0400 -+++ v8-3.14.5.10/src/heap.cc 2016-06-07 14:15:45.974393104 -0400 -@@ -50,6 +50,10 @@ - #include "v8threads.h" - #include "v8utils.h" - #include "vm-state-inl.h" -+#if V8_TARGET_ARCH_PPC && !V8_INTERPRETED_REGEXP -+#include "regexp-macro-assembler.h" -+#include "ppc/regexp-macro-assembler-ppc.h" -+#endif - #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP - #include "regexp-macro-assembler.h" - #include "arm/regexp-macro-assembler-arm.h" -diff -up v8-3.14.5.10/src/hydrogen.cc.ppc v8-3.14.5.10/src/hydrogen.cc ---- v8-3.14.5.10/src/hydrogen.cc.ppc 2016-06-07 14:15:45.934393343 -0400 -+++ v8-3.14.5.10/src/hydrogen.cc 2016-06-07 14:15:45.975393098 -0400 -@@ -43,6 +43,8 @@ - #include "x64/lithium-codegen-x64.h" - #elif V8_TARGET_ARCH_ARM - #include "arm/lithium-codegen-arm.h" -+#elif V8_TARGET_ARCH_PPC -+#include "ppc/lithium-codegen-ppc.h" - #elif V8_TARGET_ARCH_MIPS - #include "mips/lithium-codegen-mips.h" - #else -diff -up v8-3.14.5.10/src/hydrogen-instructions.cc.ppc v8-3.14.5.10/src/hydrogen-instructions.cc ---- v8-3.14.5.10/src/hydrogen-instructions.cc.ppc 2013-01-18 05:40:43.000000000 -0500 -+++ v8-3.14.5.10/src/hydrogen-instructions.cc 2016-06-07 14:15:45.974393104 -0400 -@@ -36,6 +36,8 @@ - #include "x64/lithium-x64.h" - #elif V8_TARGET_ARCH_ARM - #include "arm/lithium-arm.h" -+#elif V8_TARGET_ARCH_PPC -+#include "ppc/lithium-ppc.h" - #elif V8_TARGET_ARCH_MIPS - #include "mips/lithium-mips.h" - #else -@@ -966,8 +968,8 @@ HValue* HUnaryMathOperation::Canonicaliz - // introduced. - if (value()->representation().IsInteger32()) return value(); - --#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \ -- defined(V8_TARGET_ARCH_X64) -+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \ -+ defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_PPC) - if (value()->IsDiv() && (value()->UseCount() == 1)) { - // TODO(2038): Implement this optimization for non ARM architectures. - HDiv* hdiv = HDiv::cast(value()); -diff -up v8-3.14.5.10/src/isolate.cc.ppc v8-3.14.5.10/src/isolate.cc ---- v8-3.14.5.10/src/isolate.cc.ppc 2016-06-07 14:15:45.948393259 -0400 -+++ v8-3.14.5.10/src/isolate.cc 2016-06-07 14:15:45.976393092 -0400 -@@ -116,6 +116,8 @@ void ThreadLocalTop::Initialize() { - #ifdef USE_SIMULATOR - #ifdef V8_TARGET_ARCH_ARM - simulator_ = Simulator::current(isolate_); -+#elif V8_TARGET_ARCH_PPC -+ simulator_ = Simulator::current(isolate_); - #elif V8_TARGET_ARCH_MIPS - simulator_ = Simulator::current(isolate_); - #endif -@@ -1425,6 +1427,8 @@ char* Isolate::RestoreThread(char* from) - #ifdef USE_SIMULATOR - #ifdef V8_TARGET_ARCH_ARM - thread_local_top()->simulator_ = Simulator::current(this); -+#elif V8_TARGET_ARCH_PPC -+ thread_local_top()->simulator_ = Simulator::current(this); - #elif V8_TARGET_ARCH_MIPS - thread_local_top()->simulator_ = Simulator::current(this); - #endif -@@ -1562,6 +1566,7 @@ Isolate::Isolate() - thread_manager_->isolate_ = this; - - #if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \ -+ defined(V8_TARGET_ARCH_PPC) && !defined(__PPC__) || \ - defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__) - simulator_initialized_ = false; - simulator_i_cache_ = NULL; -@@ -1879,7 +1884,8 @@ bool Isolate::Init(Deserializer* des) { - - // Initialize other runtime facilities - #if defined(USE_SIMULATOR) --#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) -+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) \ -+ || defined(V8_TARGET_ARCH_PPC) - Simulator::Initialize(this); - #endif - #endif -diff -up v8-3.14.5.10/src/isolate.h.ppc v8-3.14.5.10/src/isolate.h ---- v8-3.14.5.10/src/isolate.h.ppc 2016-06-07 14:15:45.948393259 -0400 -+++ v8-3.14.5.10/src/isolate.h 2016-06-07 14:15:45.976393092 -0400 -@@ -94,7 +94,9 @@ class Debugger; - class DebuggerAgent; - #endif - --#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \ -+#if defined(NATIVE_SIMULATION) || \ -+ !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \ -+ !defined(__PPC__) && defined(V8_TARGET_ARCH_PPC) || \ - !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS) - class Redirection; - class Simulator; -@@ -256,7 +258,8 @@ class ThreadLocalTop BASE_EMBEDDED { - Address handler_; // try-blocks are chained through the stack - - #ifdef USE_SIMULATOR --#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) -+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_PPC) || \ -+ defined(V8_TARGET_ARCH_MIPS) - Simulator* simulator_; - #endif - #endif // USE_SIMULATOR -@@ -374,7 +377,9 @@ class Isolate { - thread_id_(thread_id), - stack_limit_(0), - thread_state_(NULL), --#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \ -+#if defined(NATIVE_SIMULATION) || \ -+ !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \ -+ !defined(__PPC__) && defined(V8_TARGET_ARCH_PPC) || \ - !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS) - simulator_(NULL), - #endif -@@ -387,7 +392,9 @@ class Isolate { - ThreadState* thread_state() const { return thread_state_; } - void set_thread_state(ThreadState* value) { thread_state_ = value; } - --#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \ -+#if defined(NATIVE_SIMULATION) || \ -+ !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \ -+ !defined(__PPC__) && defined(V8_TARGET_ARCH_PPC) || \ - !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS) - Simulator* simulator() const { return simulator_; } - void set_simulator(Simulator* simulator) { -@@ -405,7 +412,9 @@ class Isolate { - uintptr_t stack_limit_; - ThreadState* thread_state_; - --#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \ -+#if defined(NATIVE_SIMULATION) || \ -+ !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \ -+ !defined(__PPC__) && defined(V8_TARGET_ARCH_PPC) || \ - !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS) - Simulator* simulator_; - #endif -@@ -972,7 +981,9 @@ class Isolate { - int* code_kind_statistics() { return code_kind_statistics_; } - #endif - --#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \ -+#if defined(NATIVE_SIMULATION) || \ -+ defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \ -+ defined(V8_TARGET_ARCH_PPC) && !defined(__PPC__) || \ - defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__) - bool simulator_initialized() { return simulator_initialized_; } - void set_simulator_initialized(bool initialized) { -@@ -1252,7 +1263,9 @@ class Isolate { - // Time stamp at initialization. - double time_millis_at_init_; - --#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \ -+#if defined(NATIVE_SIMULATION) || \ -+ defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \ -+ defined(V8_TARGET_ARCH_PPC) && !defined(__PPC__) || \ - defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__) - bool simulator_initialized_; - HashMap* simulator_i_cache_; -diff -up v8-3.14.5.10/src/jsregexp.cc.ppc v8-3.14.5.10/src/jsregexp.cc ---- v8-3.14.5.10/src/jsregexp.cc.ppc 2012-09-14 09:28:26.000000000 -0400 -+++ v8-3.14.5.10/src/jsregexp.cc 2016-06-07 14:15:45.977393086 -0400 -@@ -50,6 +50,8 @@ - #include "x64/regexp-macro-assembler-x64.h" - #elif V8_TARGET_ARCH_ARM - #include "arm/regexp-macro-assembler-arm.h" -+#elif V8_TARGET_ARCH_PPC -+#include "ppc/regexp-macro-assembler-ppc.h" - #elif V8_TARGET_ARCH_MIPS - #include "mips/regexp-macro-assembler-mips.h" - #else -@@ -6131,6 +6133,9 @@ RegExpEngine::CompilationResult RegExpEn - #elif V8_TARGET_ARCH_ARM - RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2, - zone); -+#elif V8_TARGET_ARCH_PPC -+ RegExpMacroAssemblerPPC macro_assembler(mode, (data->capture_count + 1) * 2, -+ zone); - #elif V8_TARGET_ARCH_MIPS - RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2, - zone); -diff -up v8-3.14.5.10/src/jsregexp.h.ppc v8-3.14.5.10/src/jsregexp.h ---- v8-3.14.5.10/src/jsregexp.h.ppc 2012-08-29 11:32:24.000000000 -0400 -+++ v8-3.14.5.10/src/jsregexp.h 2016-06-07 14:15:45.977393086 -0400 -@@ -1352,6 +1352,13 @@ class BoyerMooreLookahead : public ZoneO - // to match foo is generated only once (the traces have a common prefix). The - // code to store the capture is deferred and generated (twice) after the places - // where baz has been matched. -+ -+#ifdef _AIX -+#undef UNKNOWN -+#undef FALSE -+#undef TRUE -+#endif -+ - class Trace { - public: - // A value for a property that is either known to be true, know to be false, -diff -up v8-3.14.5.10/src/lithium-allocator.cc.ppc v8-3.14.5.10/src/lithium-allocator.cc ---- v8-3.14.5.10/src/lithium-allocator.cc.ppc 2012-07-16 07:31:55.000000000 -0400 -+++ v8-3.14.5.10/src/lithium-allocator.cc 2016-06-07 14:15:45.978393080 -0400 -@@ -37,6 +37,8 @@ - #include "x64/lithium-x64.h" - #elif V8_TARGET_ARCH_ARM - #include "arm/lithium-arm.h" -+#elif V8_TARGET_ARCH_PPC -+#include "ppc/lithium-ppc.h" - #elif V8_TARGET_ARCH_MIPS - #include "mips/lithium-mips.h" - #else -diff -up v8-3.14.5.10/src/lithium-allocator-inl.h.ppc v8-3.14.5.10/src/lithium-allocator-inl.h ---- v8-3.14.5.10/src/lithium-allocator-inl.h.ppc 2011-06-08 06:05:15.000000000 -0400 -+++ v8-3.14.5.10/src/lithium-allocator-inl.h 2016-06-07 14:15:45.977393086 -0400 -@@ -36,6 +36,8 @@ - #include "x64/lithium-x64.h" - #elif V8_TARGET_ARCH_ARM - #include "arm/lithium-arm.h" -+#elif V8_TARGET_ARCH_PPC -+#include "ppc/lithium-ppc.h" - #elif V8_TARGET_ARCH_MIPS - #include "mips/lithium-mips.h" - #else -diff -up v8-3.14.5.10/src/lithium.cc.ppc v8-3.14.5.10/src/lithium.cc ---- v8-3.14.5.10/src/lithium.cc.ppc 2016-06-07 14:15:45.911393480 -0400 -+++ v8-3.14.5.10/src/lithium.cc 2016-06-07 14:15:45.978393080 -0400 -@@ -38,6 +38,9 @@ - #elif V8_TARGET_ARCH_ARM - #include "arm/lithium-arm.h" - #include "arm/lithium-codegen-arm.h" -+#elif V8_TARGET_ARCH_PPC -+#include "ppc/lithium-ppc.h" -+#include "ppc/lithium-codegen-ppc.h" - #elif V8_TARGET_ARCH_MIPS - #include "mips/lithium-mips.h" - #include "mips/lithium-codegen-mips.h" -diff -up v8-3.14.5.10/src/log.cc.ppc v8-3.14.5.10/src/log.cc ---- v8-3.14.5.10/src/log.cc.ppc 2012-08-29 11:32:24.000000000 -0400 -+++ v8-3.14.5.10/src/log.cc 2016-06-07 14:15:45.978393080 -0400 -@@ -1526,6 +1526,8 @@ void Logger::LogCodeInfo() { - const char arch[] = "x64"; - #elif V8_TARGET_ARCH_ARM - const char arch[] = "arm"; -+#elif V8_TARGET_ARCH_PPC -+ const char arch[] = "ppc"; - #elif V8_TARGET_ARCH_MIPS - const char arch[] = "mips"; - #else -diff -up v8-3.14.5.10/src/macro-assembler.h.ppc v8-3.14.5.10/src/macro-assembler.h ---- v8-3.14.5.10/src/macro-assembler.h.ppc 2012-02-14 06:46:07.000000000 -0500 -+++ v8-3.14.5.10/src/macro-assembler.h 2016-06-07 14:15:45.978393080 -0400 -@@ -58,6 +58,13 @@ const int kInvalidProtoDepth = -1; - #include "arm/assembler-arm-inl.h" - #include "code.h" // must be after assembler_*.h - #include "arm/macro-assembler-arm.h" -+#elif V8_TARGET_ARCH_PPC -+#include "ppc/constants-ppc.h" -+#include "assembler.h" -+#include "ppc/assembler-ppc.h" -+#include "ppc/assembler-ppc-inl.h" -+#include "code.h" // must be after assembler_*.h -+#include "ppc/macro-assembler-ppc.h" - #elif V8_TARGET_ARCH_MIPS - #include "mips/constants-mips.h" - #include "assembler.h" -diff -up v8-3.14.5.10/src/objects.cc.ppc v8-3.14.5.10/src/objects.cc ---- v8-3.14.5.10/src/objects.cc.ppc 2016-06-07 14:15:45.916393450 -0400 -+++ v8-3.14.5.10/src/objects.cc 2016-06-07 14:15:45.981393062 -0400 -@@ -7009,8 +7009,8 @@ static inline bool CompareRawStringConte - // then we have to check that the strings are aligned before - // comparing them blockwise. - const int kAlignmentMask = sizeof(uint32_t) - 1; // NOLINT -- uint32_t pa_addr = reinterpret_cast(pa); -- uint32_t pb_addr = reinterpret_cast(pb); -+ uintptr_t pa_addr = reinterpret_cast(pa); -+ uintptr_t pb_addr = reinterpret_cast(pb); - if (((pa_addr & kAlignmentMask) | (pb_addr & kAlignmentMask)) == 0) { - #endif - const int kStepSize = sizeof(int) / sizeof(Char); // NOLINT -diff -up v8-3.14.5.10/src/objects.h.ppc v8-3.14.5.10/src/objects.h ---- v8-3.14.5.10/src/objects.h.ppc 2016-06-07 14:15:45.963393170 -0400 -+++ v8-3.14.5.10/src/objects.h 2016-06-07 14:15:45.982393056 -0400 -@@ -37,6 +37,8 @@ - #include "unicode-inl.h" - #if V8_TARGET_ARCH_ARM - #include "arm/constants-arm.h" -+#elif V8_TARGET_ARCH_PPC -+#include "ppc/constants-ppc.h" - #elif V8_TARGET_ARCH_MIPS - #include "mips/constants-mips.h" - #endif -@@ -5852,10 +5854,11 @@ class SharedFunctionInfo: public HeapObj - // garbage collections. - // To avoid wasting space on 64-bit architectures we use - // the following trick: we group integer fields into pairs -- // First integer in each pair is shifted left by 1. -+ // The least significant integer in each pair is shifted left by 1. - // By doing this we guarantee that LSB of each kPointerSize aligned - // word is not set and thus this word cannot be treated as pointer - // to HeapObject during old space traversal. -+#if __BYTE_ORDER == __LITTLE_ENDIAN - static const int kLengthOffset = - kAstNodeCountOffset + kPointerSize; - static const int kFormalParameterCountOffset = -@@ -5883,6 +5886,38 @@ class SharedFunctionInfo: public HeapObj - - static const int kCountersOffset = kOptCountOffset + kIntSize; - static const int kStressDeoptCounterOffset = kCountersOffset + kIntSize; -+#elif __BYTE_ORDER == __BIG_ENDIAN -+ static const int kFormalParameterCountOffset = -+ kAstNodeCountOffset + kPointerSize; -+ static const int kLengthOffset = -+ kFormalParameterCountOffset + kIntSize; -+ -+ static const int kNumLiteralsOffset = -+ kLengthOffset + kIntSize; -+ static const int kExpectedNofPropertiesOffset = -+ kNumLiteralsOffset + kIntSize; -+ -+ static const int kStartPositionAndTypeOffset = -+ kExpectedNofPropertiesOffset + kIntSize; -+ static const int kEndPositionOffset = -+ kStartPositionAndTypeOffset + kIntSize; -+ -+ static const int kCompilerHintsOffset = -+ kEndPositionOffset + kIntSize; -+ static const int kFunctionTokenPositionOffset = -+ kCompilerHintsOffset + kIntSize; -+ -+ static const int kOptCountOffset = -+ kFunctionTokenPositionOffset + kIntSize; -+ static const int kThisPropertyAssignmentsCountOffset = -+ kOptCountOffset + kIntSize; -+ -+ static const int kStressDeoptCounterOffset = -+ kThisPropertyAssignmentsCountOffset + kIntSize; -+ static const int kCountersOffset = kStressDeoptCounterOffset + kIntSize; -+#else -+#error Unknown byte ordering -+#endif - - // Total size. - static const int kSize = kStressDeoptCounterOffset + kIntSize; -@@ -7322,8 +7357,13 @@ class String: public HeapObject { - - // Layout description. - static const int kLengthOffset = HeapObject::kHeaderSize; -- static const int kHashFieldOffset = kLengthOffset + kPointerSize; -- static const int kSize = kHashFieldOffset + kPointerSize; -+ static const int kHashFieldSlot = kLengthOffset + kPointerSize; -+#if __BYTE_ORDER == __LITTLE_ENDIAN || !V8_HOST_ARCH_64_BIT -+ static const int kHashFieldOffset = kHashFieldSlot; -+#else -+ static const int kHashFieldOffset = kHashFieldSlot + kIntSize; -+#endif -+ static const int kSize = kHashFieldSlot + kPointerSize; - - // Maximum number of characters to consider when trying to convert a string - // value into an array index. -diff -up v8-3.14.5.10/src/objects-inl.h.ppc v8-3.14.5.10/src/objects-inl.h ---- v8-3.14.5.10/src/objects-inl.h.ppc 2016-06-07 14:15:45.901393539 -0400 -+++ v8-3.14.5.10/src/objects-inl.h 2016-06-07 14:15:45.979393074 -0400 -@@ -1061,7 +1061,7 @@ bool Smi::IsValid(intptr_t value) { - bool in_range = (value >= kMinValue) && (value <= kMaxValue); - #endif - --#ifdef V8_TARGET_ARCH_X64 -+#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_PPC64) - // To be representable as a long smi, the value must be a 32-bit integer. - bool result = (value == static_cast(value)); - #else -@@ -2409,10 +2409,7 @@ uint32_t String::hash_field() { - - - void String::set_hash_field(uint32_t value) { -- WRITE_UINT32_FIELD(this, kHashFieldOffset, value); --#if V8_HOST_ARCH_64_BIT -- WRITE_UINT32_FIELD(this, kHashFieldOffset + kIntSize, 0); --#endif -+ WRITE_INTPTR_FIELD(this, kHashFieldSlot, value); - } - - -@@ -3977,25 +3974,33 @@ SMI_ACCESSORS(SharedFunctionInfo, - kStressDeoptCounterOffset) - #else - --#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \ -- STATIC_ASSERT(holder::offset % kPointerSize == 0); \ -- int holder::name() { \ -- int value = READ_INT_FIELD(this, offset); \ -- ASSERT(kHeapObjectTag == 1); \ -- ASSERT((value & kHeapObjectTag) == 0); \ -- return value >> 1; \ -- } \ -- void holder::set_##name(int value) { \ -- ASSERT(kHeapObjectTag == 1); \ -- ASSERT((value & 0xC0000000) == 0xC0000000 || \ -- (value & 0xC0000000) == 0x000000000); \ -- WRITE_INT_FIELD(this, \ -- offset, \ -- (value << 1) & ~kHeapObjectTag); \ -+#if __BYTE_ORDER == __LITTLE_ENDIAN -+#define PSEUDO_SMI_LO_ALIGN 0 -+#define PSEUDO_SMI_HI_ALIGN kIntSize -+#else -+#define PSEUDO_SMI_LO_ALIGN kIntSize -+#define PSEUDO_SMI_HI_ALIGN 0 -+#endif -+ -+#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \ -+ STATIC_ASSERT(holder::offset % kPointerSize == PSEUDO_SMI_LO_ALIGN); \ -+ int holder::name() { \ -+ int value = READ_INT_FIELD(this, offset); \ -+ ASSERT(kHeapObjectTag == 1); \ -+ ASSERT((value & kHeapObjectTag) == 0); \ -+ return value >> 1; \ -+ } \ -+ void holder::set_##name(int value) { \ -+ ASSERT(kHeapObjectTag == 1); \ -+ ASSERT((value & 0xC0000000) == 0xC0000000 || \ -+ (value & 0xC0000000) == 0x000000000); \ -+ WRITE_INT_FIELD(this, \ -+ offset, \ -+ (value << 1) & ~kHeapObjectTag); \ - } - --#define PSEUDO_SMI_ACCESSORS_HI(holder, name, offset) \ -- STATIC_ASSERT(holder::offset % kPointerSize == kIntSize); \ -+#define PSEUDO_SMI_ACCESSORS_HI(holder, name, offset) \ -+ STATIC_ASSERT(holder::offset % kPointerSize == PSEUDO_SMI_HI_ALIGN); \ - INT_ACCESSORS(holder, name, offset) - - -diff -up v8-3.14.5.10/src/platform-aix.cc.ppc v8-3.14.5.10/src/platform-aix.cc ---- v8-3.14.5.10/src/platform-aix.cc.ppc 2016-06-07 14:15:45.983393050 -0400 -+++ v8-3.14.5.10/src/platform-aix.cc 2016-06-07 14:15:45.983393050 -0400 -@@ -0,0 +1,894 @@ -+// Copyright 2013 the V8 project authors. All rights reserved. -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions are -+// met: -+// -+// * Redistributions of source code must retain the above copyright -+// notice, this list of conditions and the following disclaimer. -+// * Redistributions in binary form must reproduce the above -+// copyright notice, this list of conditions and the following -+// disclaimer in the documentation and/or other materials provided -+// with the distribution. -+// * Neither the name of Google Inc. nor the names of its -+// contributors may be used to endorse or promote products derived -+// from this software without specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+// Platform specific code for AIX goes here. For the POSIX comaptible parts -+// the implementation is in platform-posix.cc. -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include // mmap & munmap -+#include // mmap & munmap -+#include // open -+#include // open -+#include // getpagesize -+#include // index -+#include -+#include -+#include -+ -+#undef MAP_TYPE -+ -+#include "v8.h" -+#include "v8threads.h" -+ -+#include "platform-posix.h" -+#include "platform.h" -+#include "vm-state-inl.h" -+ -+ -+namespace v8 { -+namespace internal { -+ -+// 0 is never a valid thread id on AIX since tids and pids share a -+// name space and pid 0 is used to kill the group (see man 2 kill). -+static const pthread_t kNoThread = (pthread_t) 0; -+ -+ -+double ceiling(double x) { -+ // Correct as on OS X -+ if (-1.0 < x && x < 0.0) { -+ return -0.0; -+ } else { -+ return ceil(x); -+ } -+} -+ -+ -+static Mutex* limit_mutex = NULL; -+ -+ -+void OS::PostSetUp() { -+ POSIXPostSetUp(); -+} -+ -+ -+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { -+ __asm__ __volatile__("" : : : "memory"); -+ *ptr = value; -+} -+ -+ -+uint64_t OS::CpuFeaturesImpliedByPlatform() { -+ return 0; // AIX runs on anything. -+} -+ -+ -+int OS::ActivationFrameAlignment() { -+ // 8 byte alignment on AIX -+ return 8; -+} -+ -+ -+const char* OS::LocalTimezone(double time) { -+ if (isnan(time)) return ""; -+ time_t tv = static_cast(floor(time/msPerSecond)); -+ struct tm* t = localtime(&tv); -+ if (NULL == t) return ""; -+ return tzname[0]; // The location of the timezone string on AIX. -+} -+ -+ -+double OS::LocalTimeOffset() { -+ // On AIX, struct tm does not contain a tm_gmtoff field. -+ time_t utc = time(NULL); -+ ASSERT(utc != -1); -+ struct tm* loc = localtime(&utc); -+ ASSERT(loc != NULL); -+ return static_cast((mktime(loc) - utc) * msPerSecond); -+} -+ -+ -+// We keep the lowest and highest addresses mapped as a quick way of -+// determining that pointers are outside the heap (used mostly in assertions -+// and verification). The estimate is conservative, i.e., not all addresses in -+// 'allocated' space are actually allocated to our heap. The range is -+// [lowest, highest), inclusive on the low and and exclusive on the high end. -+static void* lowest_ever_allocated = reinterpret_cast(-1); -+static void* highest_ever_allocated = reinterpret_cast(0); -+ -+ -+static void UpdateAllocatedSpaceLimits(void* address, int size) { -+ ASSERT(limit_mutex != NULL); -+ ScopedLock lock(limit_mutex); -+ -+ lowest_ever_allocated = Min(lowest_ever_allocated, address); -+ highest_ever_allocated = -+ Max(highest_ever_allocated, -+ reinterpret_cast(reinterpret_cast(address) + size)); -+} -+ -+ -+bool OS::IsOutsideAllocatedSpace(void* address) { -+ return address < lowest_ever_allocated || address >= highest_ever_allocated; -+} -+ -+ -+size_t OS::AllocateAlignment() { -+ return getpagesize(); -+} -+ -+ -+void* OS::Allocate(const size_t requested, -+ size_t* allocated, -+ bool executable) { -+ const size_t msize = RoundUp(requested, getpagesize()); -+ int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0); -+ void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); -+ -+ if (mbase == MAP_FAILED) { -+ LOG(ISOLATE, StringEvent("OS::Allocate", "mmap failed")); -+ return NULL; -+ } -+ *allocated = msize; -+ UpdateAllocatedSpaceLimits(mbase, msize); -+ return mbase; -+} -+ -+ -+void OS::Free(void* buf, const size_t length) { -+ // TODO(1240712): munmap has a return value which is ignored here. -+ int result = munmap(buf, length); -+ USE(result); -+ ASSERT(result == 0); -+} -+ -+ -+void OS::Sleep(int milliseconds) { -+ unsigned int ms = static_cast(milliseconds); -+ usleep(1000 * ms); -+} -+ -+ -+void OS::Abort() { -+ // Redirect to std abort to signal abnormal program termination. -+ abort(); -+} -+ -+ -+void OS::DebugBreak() { -+#if (defined(__arm__) || defined(__thumb__)) -+# if defined(CAN_USE_ARMV5_INSTRUCTIONS) -+ asm("bkpt 0"); -+# endif -+#elif defined(_ARCH_PPC) -+ asm("trap"); -+#else -+ asm("int $3"); -+#endif -+} -+ -+ -+class PosixMemoryMappedFile : public OS::MemoryMappedFile { -+ public: -+ PosixMemoryMappedFile(FILE* file, void* memory, int size) -+ : file_(file), memory_(memory), size_(size) { } -+ virtual ~PosixMemoryMappedFile(); -+ virtual void* memory() { return memory_; } -+ virtual int size() { return size_; } -+ private: -+ FILE* file_; -+ void* memory_; -+ int size_; -+}; -+ -+ -+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { -+ FILE* file = fopen(name, "r+"); -+ if (file == NULL) return NULL; -+ -+ fseek(file, 0, SEEK_END); -+ int size = ftell(file); -+ -+ void* memory = -+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); -+ return new PosixMemoryMappedFile(file, memory, size); -+} -+ -+ -+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size, -+ void* initial) { -+ FILE* file = fopen(name, "w+"); -+ if (file == NULL) return NULL; -+ int result = fwrite(initial, size, 1, file); -+ if (result < 1) { -+ fclose(file); -+ return NULL; -+ } -+ void* memory = -+ mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0); -+ return new PosixMemoryMappedFile(file, memory, size); -+} -+ -+ -+PosixMemoryMappedFile::~PosixMemoryMappedFile() { -+ if (memory_) munmap(memory_, size_); -+ fclose(file_); -+} -+ -+ -+static unsigned StringToLong(char* buffer) { -+ return static_cast(strtol(buffer, NULL, 16)); // NOLINT -+} -+ -+ -+void OS::LogSharedLibraryAddresses() { -+ static const int MAP_LENGTH = 1024; -+ int fd = open("/proc/self/maps", O_RDONLY); -+ if (fd < 0) return; -+ while (true) { -+ char addr_buffer[11]; -+ addr_buffer[0] = '0'; -+ addr_buffer[1] = 'x'; -+ addr_buffer[10] = 0; -+ int result = read(fd, addr_buffer + 2, 8); -+ if (result < 8) break; -+ unsigned start = StringToLong(addr_buffer); -+ result = read(fd, addr_buffer + 2, 1); -+ if (result < 1) break; -+ if (addr_buffer[2] != '-') break; -+ result = read(fd, addr_buffer + 2, 8); -+ if (result < 8) break; -+ unsigned end = StringToLong(addr_buffer); -+ char buffer[MAP_LENGTH]; -+ int bytes_read = -1; -+ do { -+ bytes_read++; -+ if (bytes_read >= MAP_LENGTH - 1) -+ break; -+ result = read(fd, buffer + bytes_read, 1); -+ if (result < 1) break; -+ } while (buffer[bytes_read] != '\n'); -+ buffer[bytes_read] = 0; -+ // Ignore mappings that are not executable. -+ if (buffer[3] != 'x') continue; -+ char* start_of_path = index(buffer, '/'); -+ // There may be no filename in this line. Skip to next. -+ if (start_of_path == NULL) continue; -+ buffer[bytes_read] = 0; -+ LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end)); -+ } -+ close(fd); -+} -+ -+ -+void OS::SignalCodeMovingGC() { -+} -+ -+ -+int OS::StackWalk(Vector frames) { -+ return 0; -+} -+ -+ -+// Constants used for mmap. -+static const int kMmapFd = -1; -+static const int kMmapFdOffset = 0; -+ -+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { } -+ -+VirtualMemory::VirtualMemory(size_t size) { -+ address_ = ReserveRegion(size); -+ size_ = size; -+} -+ -+ -+VirtualMemory::VirtualMemory(size_t size, size_t alignment) -+ : address_(NULL), size_(0) { -+ ASSERT(IsAligned(alignment, static_cast(OS::AllocateAlignment()))); -+ size_t request_size = RoundUp(size + alignment, -+ static_cast(OS::AllocateAlignment())); -+ void* reservation = mmap(OS::GetRandomMmapAddr(), -+ request_size, -+ PROT_NONE, -+ MAP_PRIVATE | MAP_ANONYMOUS, -+ kMmapFd, -+ kMmapFdOffset); -+ if (reservation == MAP_FAILED) return; -+ -+ Address base = static_cast
(reservation); -+ Address aligned_base = RoundUp(base, alignment); -+ ASSERT_LE(base, aligned_base); -+ -+ // Unmap extra memory reserved before and after the desired block. -+ if (aligned_base != base) { -+ size_t prefix_size = static_cast(aligned_base - base); -+ OS::Free(base, prefix_size); -+ request_size -= prefix_size; -+ } -+ -+ size_t aligned_size = RoundUp(size, OS::AllocateAlignment()); -+ ASSERT_LE(aligned_size, request_size); -+ -+ if (aligned_size != request_size) { -+ size_t suffix_size = request_size - aligned_size; -+ OS::Free(aligned_base + aligned_size, suffix_size); -+ request_size -= suffix_size; -+ } -+ -+ ASSERT(aligned_size == request_size); -+ -+ address_ = static_cast(aligned_base); -+ size_ = aligned_size; -+} -+ -+ -+VirtualMemory::~VirtualMemory() { -+ if (IsReserved()) { -+ bool result = ReleaseRegion(address(), size()); -+ ASSERT(result); -+ USE(result); -+ } -+} -+ -+ -+bool VirtualMemory::IsReserved() { -+ return address_ != NULL; -+} -+ -+ -+void VirtualMemory::Reset() { -+ address_ = NULL; -+ size_ = 0; -+} -+ -+ -+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) { -+ return CommitRegion(address, size, is_executable); -+} -+ -+ -+bool VirtualMemory::Uncommit(void* address, size_t size) { -+ return UncommitRegion(address, size); -+} -+ -+ -+bool VirtualMemory::Guard(void* address) { -+ OS::Guard(address, OS::CommitPageSize()); -+ return true; -+} -+ -+ -+void* VirtualMemory::ReserveRegion(size_t size) { -+ void* result = mmap(OS::GetRandomMmapAddr(), -+ size, -+ PROT_NONE, -+ MAP_PRIVATE | MAP_ANONYMOUS, -+ kMmapFd, -+ kMmapFdOffset); -+ -+ if (result == MAP_FAILED) return NULL; -+ -+ return result; -+} -+ -+ -+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) { -+ int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0); -+ if (mprotect(base, size, prot) == -1) return false; -+ -+ UpdateAllocatedSpaceLimits(base, size); -+ return true; -+} -+ -+ -+bool VirtualMemory::UncommitRegion(void* base, size_t size) { -+ return mprotect(base, size, PROT_NONE) != -1; -+} -+ -+ -+bool VirtualMemory::ReleaseRegion(void* base, size_t size) { -+ return munmap(base, size) == 0; -+} -+ -+ -+class Thread::PlatformData : public Malloced { -+ public: -+ pthread_t thread_; // Thread handle for pthread. -+}; -+ -+ -+Thread::Thread(const Options& options) -+ : data_(new PlatformData), -+ stack_size_(options.stack_size()) { -+ set_name(options.name()); -+} -+ -+ -+Thread::~Thread() { -+ delete data_; -+} -+ -+ -+static void* ThreadEntry(void* arg) { -+ Thread* thread = reinterpret_cast(arg); -+ // This is also initialized by the first argument to pthread_create() but we -+ // don't know which thread will run first (the original thread or the new -+ // one) so we initialize it here too. -+ thread->data()->thread_ = pthread_self(); -+ ASSERT(thread->data()->thread_ != kNoThread); -+ thread->Run(); -+ return NULL; -+} -+ -+ -+void Thread::set_name(const char* name) { -+ strncpy(name_, name, sizeof(name_)); -+ name_[sizeof(name_) - 1] = '\0'; -+} -+ -+ -+void Thread::Start() { -+ pthread_attr_t attr; -+ size_t stack_size = stack_size_; -+ -+ if (stack_size == 0) { -+ // Default is 96KB -- bump up to 2MB -+ stack_size = 2 * MB; -+ } -+ pthread_attr_init(&attr); -+ pthread_attr_setstacksize(&attr, static_cast(stack_size)); -+ pthread_create(&data_->thread_, &attr, ThreadEntry, this); -+ ASSERT(data_->thread_ != kNoThread); -+} -+ -+ -+void Thread::Join() { -+ pthread_join(data_->thread_, NULL); -+} -+ -+ -+Thread::LocalStorageKey Thread::CreateThreadLocalKey() { -+ pthread_key_t key; -+ int result = pthread_key_create(&key, NULL); -+ USE(result); -+ ASSERT(result == 0); -+ return static_cast(key); -+} -+ -+ -+void Thread::DeleteThreadLocalKey(LocalStorageKey key) { -+ pthread_key_t pthread_key = static_cast(key); -+ int result = pthread_key_delete(pthread_key); -+ USE(result); -+ ASSERT(result == 0); -+} -+ -+ -+void* Thread::GetThreadLocal(LocalStorageKey key) { -+ pthread_key_t pthread_key = static_cast(key); -+ return pthread_getspecific(pthread_key); -+} -+ -+ -+void Thread::SetThreadLocal(LocalStorageKey key, void* value) { -+ pthread_key_t pthread_key = static_cast(key); -+ pthread_setspecific(pthread_key, value); -+} -+ -+ -+void Thread::YieldCPU() { -+ sched_yield(); -+} -+ -+ -+class AIXMutex : public Mutex { -+ public: -+ AIXMutex() { -+ pthread_mutexattr_t attrs; -+ int result = pthread_mutexattr_init(&attrs); -+ ASSERT(result == 0); -+ result = pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_RECURSIVE); -+ ASSERT(result == 0); -+ result = pthread_mutex_init(&mutex_, &attrs); -+ ASSERT(result == 0); -+ USE(result); -+ } -+ -+ virtual ~AIXMutex() { pthread_mutex_destroy(&mutex_); } -+ -+ virtual int Lock() { -+ int result = pthread_mutex_lock(&mutex_); -+ return result; -+ } -+ -+ virtual int Unlock() { -+ int result = pthread_mutex_unlock(&mutex_); -+ return result; -+ } -+ -+ virtual bool TryLock() { -+ int result = pthread_mutex_trylock(&mutex_); -+ // Return false if the lock is busy and locking failed. -+ if (result == EBUSY) { -+ return false; -+ } -+ ASSERT(result == 0); // Verify no other errors. -+ return true; -+ } -+ -+ private: -+ pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms. -+}; -+ -+ -+Mutex* OS::CreateMutex() { -+ return new AIXMutex(); -+} -+ -+ -+class AIXSemaphore : public Semaphore { -+ public: -+ explicit AIXSemaphore(int count) { sem_init(&sem_, 0, count); } -+ virtual ~AIXSemaphore() { sem_destroy(&sem_); } -+ -+ virtual void Wait(); -+ virtual bool Wait(int timeout); -+ virtual void Signal() { sem_post(&sem_); } -+ private: -+ sem_t sem_; -+}; -+ -+ -+void AIXSemaphore::Wait() { -+ while (true) { -+ int result = sem_wait(&sem_); -+ if (result == 0) return; // Successfully got semaphore. -+ CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. -+ } -+} -+ -+ -+#ifndef TIMEVAL_TO_TIMESPEC -+#define TIMEVAL_TO_TIMESPEC(tv, ts) do { \ -+ (ts)->tv_sec = (tv)->tv_sec; \ -+ (ts)->tv_nsec = (tv)->tv_usec * 1000; \ -+} while (false) -+#endif -+ -+ -+#ifndef timeradd -+#define timeradd(a, b, result) \ -+ do { \ -+ (result)->tv_sec = (a)->tv_sec + (b)->tv_sec; \ -+ (result)->tv_usec = (a)->tv_usec + (b)->tv_usec; \ -+ if ((result)->tv_usec >= 1000000) { \ -+ ++(result)->tv_sec; \ -+ (result)->tv_usec -= 1000000; \ -+ } \ -+ } while (0) -+#endif -+ -+ -+bool AIXSemaphore::Wait(int timeout) { -+ const long kOneSecondMicros = 1000000; // NOLINT -+ -+ // Split timeout into second and nanosecond parts. -+ struct timeval delta; -+ delta.tv_usec = timeout % kOneSecondMicros; -+ delta.tv_sec = timeout / kOneSecondMicros; -+ -+ struct timeval current_time; -+ // Get the current time. -+ if (gettimeofday(¤t_time, NULL) == -1) { -+ return false; -+ } -+ -+ // Calculate time for end of timeout. -+ struct timeval end_time; -+ timeradd(¤t_time, &delta, &end_time); -+ -+ struct timespec ts; -+ TIMEVAL_TO_TIMESPEC(&end_time, &ts); -+ while (true) { -+ int result = sem_timedwait(&sem_, &ts); -+ if (result == 0) return true; // Successfully got semaphore. -+ if (result == -1 && errno == ETIMEDOUT) return false; // Timeout. -+ CHECK(result == -1 && errno == EINTR); // Signal caused spurious wakeup. -+ } -+} -+ -+ -+Semaphore* OS::CreateSemaphore(int count) { -+ return new AIXSemaphore(count); -+} -+ -+ -+static pthread_t GetThreadID() { -+ pthread_t thread_id = pthread_self(); -+ return thread_id; -+} -+ -+ -+class Sampler::PlatformData : public Malloced { -+ public: -+ PlatformData() : vm_tid_(GetThreadID()) {} -+ -+ pthread_t vm_tid() const { return vm_tid_; } -+ -+ private: -+ pthread_t vm_tid_; -+}; -+ -+ -+static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { -+ USE(info); -+ if (signal != SIGPROF) return; -+ Isolate* isolate = Isolate::UncheckedCurrent(); -+ if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) { -+ // We require a fully initialized and entered isolate. -+ return; -+ } -+ if (v8::Locker::IsActive() && -+ !isolate->thread_manager()->IsLockedByCurrentThread()) { -+ return; -+ } -+ -+ Sampler* sampler = isolate->logger()->sampler(); -+ if (sampler == NULL || !sampler->IsActive()) return; -+ -+ TickSample sample_obj; -+ TickSample* sample = CpuProfiler::TickSampleEvent(isolate); -+ if (sample == NULL) sample = &sample_obj; -+ -+ // Extracting the sample from the context is extremely machine dependent. -+ ucontext_t* ucontext = reinterpret_cast(context); -+ mcontext_t& mcontext = ucontext->uc_mcontext; -+ sample->state = isolate->current_vm_state(); -+#if V8_HOST_ARCH_IA32 -+ sample->pc = reinterpret_cast
(mcontext.mc_eip); -+ sample->sp = reinterpret_cast
(mcontext.mc_esp); -+ sample->fp = reinterpret_cast
(mcontext.mc_ebp); -+#elif V8_HOST_ARCH_X64 -+ sample->pc = reinterpret_cast
(mcontext.mc_rip); -+ sample->sp = reinterpret_cast
(mcontext.mc_rsp); -+ sample->fp = reinterpret_cast
(mcontext.mc_rbp); -+#elif V8_HOST_ARCH_ARM -+ sample->pc = reinterpret_cast
(mcontext.mc_r15); -+ sample->sp = reinterpret_cast
(mcontext.mc_r13); -+ sample->fp = reinterpret_cast
(mcontext.mc_r11); -+#elif V8_HOST_ARCH_PPC -+ sample->pc = reinterpret_cast
(mcontext.jmp_context.iar); -+ sample->sp = reinterpret_cast
(mcontext.jmp_context.gpr[1]); -+ sample->fp = reinterpret_cast
(mcontext.jmp_context.gpr[1]); -+#endif -+ sampler->SampleStack(sample); -+ sampler->Tick(sample); -+} -+ -+ -+class SignalSender : public Thread { -+ public: -+ enum SleepInterval { -+ HALF_INTERVAL, -+ FULL_INTERVAL -+ }; -+ -+ static const int kSignalSenderStackSize = 64 * KB; -+ -+ explicit SignalSender(int interval) -+ : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)), -+ interval_(interval) {} -+ -+ static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); } -+ static void TearDown() { delete mutex_; } -+ -+ static void AddActiveSampler(Sampler* sampler) { -+ ScopedLock lock(mutex_); -+ SamplerRegistry::AddActiveSampler(sampler); -+ if (instance_ == NULL) { -+ // Install a signal handler. -+ struct sigaction sa; -+ sa.sa_sigaction = ProfilerSignalHandler; -+ sigemptyset(&sa.sa_mask); -+ sa.sa_flags = SA_RESTART | SA_SIGINFO; -+ signal_handler_installed_ = -+ (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0); -+ -+ // Start a thread that sends SIGPROF signal to VM threads. -+ instance_ = new SignalSender(sampler->interval()); -+ instance_->Start(); -+ } else { -+ ASSERT(instance_->interval_ == sampler->interval()); -+ } -+ } -+ -+ static void RemoveActiveSampler(Sampler* sampler) { -+ ScopedLock lock(mutex_); -+ SamplerRegistry::RemoveActiveSampler(sampler); -+ if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) { -+ RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_); -+ delete instance_; -+ instance_ = NULL; -+ -+ // Restore the old signal handler. -+ if (signal_handler_installed_) { -+ sigaction(SIGPROF, &old_signal_handler_, 0); -+ signal_handler_installed_ = false; -+ } -+ } -+ } -+ -+ // Implement Thread::Run(). -+ virtual void Run() { -+ SamplerRegistry::State state; -+ while ((state = SamplerRegistry::GetState()) != -+ SamplerRegistry::HAS_NO_SAMPLERS) { -+ bool cpu_profiling_enabled = -+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS); -+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled(); -+ // When CPU profiling is enabled both JavaScript and C++ code is -+ // profiled. We must not suspend. -+ if (!cpu_profiling_enabled) { -+ if (rate_limiter_.SuspendIfNecessary()) continue; -+ } -+ if (cpu_profiling_enabled && runtime_profiler_enabled) { -+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) { -+ return; -+ } -+ Sleep(HALF_INTERVAL); -+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) { -+ return; -+ } -+ Sleep(HALF_INTERVAL); -+ } else { -+ if (cpu_profiling_enabled) { -+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, -+ this)) { -+ return; -+ } -+ } -+ if (runtime_profiler_enabled) { -+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, -+ NULL)) { -+ return; -+ } -+ } -+ Sleep(FULL_INTERVAL); -+ } -+ } -+ } -+ -+ static void DoCpuProfile(Sampler* sampler, void* raw_sender) { -+ if (!sampler->IsProfiling()) return; -+ SignalSender* sender = reinterpret_cast(raw_sender); -+ sender->SendProfilingSignal(sampler->platform_data()->vm_tid()); -+ } -+ -+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) { -+ if (!sampler->isolate()->IsInitialized()) return; -+ sampler->isolate()->runtime_profiler()->NotifyTick(); -+ } -+ -+ void SendProfilingSignal(pthread_t tid) { -+ if (!signal_handler_installed_) return; -+ pthread_kill(tid, SIGPROF); -+ } -+ -+ void Sleep(SleepInterval full_or_half) { -+ // Convert ms to us and subtract 100 us to compensate delays -+ // occuring during signal delivery. -+ useconds_t interval = interval_ * 1000 - 100; -+ if (full_or_half == HALF_INTERVAL) interval /= 2; -+ int result = usleep(interval); -+#ifdef DEBUG -+ if (result != 0 && errno != EINTR) { -+ fprintf(stderr, -+ "SignalSender usleep error; interval = %u, errno = %d\n", -+ interval, -+ errno); -+ ASSERT(result == 0 || errno == EINTR); -+ } -+#endif -+ USE(result); -+ } -+ -+ const int interval_; -+ RuntimeProfilerRateLimiter rate_limiter_; -+ -+ // Protects the process wide state below. -+ static Mutex* mutex_; -+ static SignalSender* instance_; -+ static bool signal_handler_installed_; -+ static struct sigaction old_signal_handler_; -+ -+ private: -+ DISALLOW_COPY_AND_ASSIGN(SignalSender); -+}; -+ -+Mutex* SignalSender::mutex_ = NULL; -+SignalSender* SignalSender::instance_ = NULL; -+struct sigaction SignalSender::old_signal_handler_; -+bool SignalSender::signal_handler_installed_ = false; -+ -+ -+void OS::SetUp() { -+ // Seed the random number generator. -+ // Convert the current time to a 64-bit integer first, before converting it -+ // to an unsigned. Going directly can cause an overflow and the seed to be -+ // set to all ones. The seed will be identical for different instances that -+ // call this setup code within the same millisecond. -+ uint64_t seed = static_cast(TimeCurrentMillis()); -+ srandom(static_cast(seed)); -+ limit_mutex = CreateMutex(); -+ SignalSender::SetUp(); -+} -+ -+ -+void OS::TearDown() { -+ SignalSender::TearDown(); -+ delete limit_mutex; -+} -+ -+ -+Sampler::Sampler(Isolate* isolate, int interval) -+ : isolate_(isolate), -+ interval_(interval), -+ profiling_(false), -+ active_(false), -+ samples_taken_(0) { -+ data_ = new PlatformData; -+} -+ -+ -+Sampler::~Sampler() { -+ ASSERT(!IsActive()); -+ delete data_; -+} -+ -+ -+void Sampler::Start() { -+ ASSERT(!IsActive()); -+ SetActive(true); -+ SignalSender::AddActiveSampler(this); -+} -+ -+ -+void Sampler::Stop() { -+ ASSERT(IsActive()); -+ SignalSender::RemoveActiveSampler(this); -+ SetActive(false); -+} -+ -+ -+} } // namespace v8::internal -diff -up v8-3.14.5.10/src/platform-freebsd.cc.ppc v8-3.14.5.10/src/platform-freebsd.cc ---- v8-3.14.5.10/src/platform-freebsd.cc.ppc 2016-06-07 14:15:45.950393247 -0400 -+++ v8-3.14.5.10/src/platform-freebsd.cc 2016-06-07 14:15:45.983393050 -0400 -@@ -693,6 +693,10 @@ static void ProfilerSignalHandler(int si - sample->pc = reinterpret_cast
(mcontext.mc_r15); - sample->sp = reinterpret_cast
(mcontext.mc_r13); - sample->fp = reinterpret_cast
(mcontext.mc_r11); -+#elif V8_HOST_ARCH_PPC -+ sample->pc = reinterpret_cast
(mcontext.mc_r15); -+ sample->sp = reinterpret_cast
(mcontext.mc_r13); -+ sample->fp = reinterpret_cast
(mcontext.mc_r11); - #endif - sampler->SampleStack(sample); - sampler->Tick(sample); -diff -up v8-3.14.5.10/src/platform.h.ppc v8-3.14.5.10/src/platform.h ---- v8-3.14.5.10/src/platform.h.ppc 2012-10-22 09:09:53.000000000 -0400 -+++ v8-3.14.5.10/src/platform.h 2016-06-07 14:15:45.983393050 -0400 -@@ -107,7 +107,8 @@ namespace internal { - - // Use AtomicWord for a machine-sized pointer. It is assumed that - // reads and writes of naturally aligned values of this type are atomic. --#if defined(__OpenBSD__) && defined(__i386__) -+#if !defined(V8_HOST_ARCH_64_BIT) && \ -+ ((defined(__OpenBSD__) && defined(__i386__)) || defined(_AIX)) - typedef Atomic32 AtomicWord; - #else - typedef intptr_t AtomicWord; -diff -up v8-3.14.5.10/src/platform-linux.cc.ppc v8-3.14.5.10/src/platform-linux.cc ---- v8-3.14.5.10/src/platform-linux.cc.ppc 2016-06-07 14:15:45.950393247 -0400 -+++ v8-3.14.5.10/src/platform-linux.cc 2016-06-07 14:15:45.983393050 -0400 -@@ -291,6 +291,8 @@ int OS::ActivationFrameAlignment() { - return 8; - #elif V8_TARGET_ARCH_MIPS - return 8; -+#elif V8_TARGET_ARCH_PPC -+ return 8; - #endif - // With gcc 4.4 the tree vectorization optimizer can generate code - // that requires 16 byte alignment such as movdqa on x86. -@@ -300,6 +302,7 @@ int OS::ActivationFrameAlignment() { - - void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) { - #if (defined(V8_TARGET_ARCH_ARM) && defined(__arm__)) || \ -+ (defined(V8_TARGET_ARCH_PPC) && defined(__PPC__)) || \ - (defined(V8_TARGET_ARCH_MIPS) && defined(__mips__)) - // Only use on ARM or MIPS hardware. - MemoryBarrier(); -@@ -409,6 +412,9 @@ void OS::DebugBreak() { - # endif - #elif defined(__mips__) - asm("break"); -+#elif defined(__PPC__) -+ asm("twge 2,2"); -+// asm("nop"); // roohack - nothing for now; - #else - asm("int $3"); - #endif -@@ -1034,7 +1040,9 @@ static void ProfilerSignalHandler(int si - - // Extracting the sample from the context is extremely machine dependent. - ucontext_t* ucontext = reinterpret_cast(context); -+#ifndef V8_HOST_ARCH_PPC - mcontext_t& mcontext = ucontext->uc_mcontext; -+#endif - sample->state = isolate->current_vm_state(); - #if V8_HOST_ARCH_IA32 - sample->pc = reinterpret_cast
(mcontext.gregs[REG_EIP]); -@@ -1062,6 +1070,12 @@ static void ProfilerSignalHandler(int si - sample->pc = reinterpret_cast
(mcontext.pc); - sample->sp = reinterpret_cast
(mcontext.gregs[29]); - sample->fp = reinterpret_cast
(mcontext.gregs[30]); -+#elif V8_HOST_ARCH_PPC -+ sample->pc = reinterpret_cast
(ucontext->uc_mcontext.regs->nip); -+ sample->sp = -+ reinterpret_cast
(ucontext->uc_mcontext.regs->gpr[PT_R1]); -+ sample->fp = -+ reinterpret_cast
(ucontext->uc_mcontext.regs->gpr[PT_R31]); - #endif // V8_HOST_ARCH_* - sampler->SampleStack(sample); - sampler->Tick(sample); -diff -up v8-3.14.5.10/src/platform-posix.cc.ppc v8-3.14.5.10/src/platform-posix.cc ---- v8-3.14.5.10/src/platform-posix.cc.ppc 2016-06-07 14:15:45.950393247 -0400 -+++ v8-3.14.5.10/src/platform-posix.cc 2016-06-07 14:15:45.983393050 -0400 -@@ -53,6 +53,10 @@ - #include - #endif - -+#if defined(_AIX) -+#include -+#endif -+ - #include "v8.h" - - #include "codegen.h" -@@ -112,9 +116,15 @@ void* OS::GetRandomMmapAddr() { - // The range 0x20000000 - 0x60000000 is relatively unpopulated across a - // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos - // 10.6 and 10.7. -+ // The range 0x30000000 - 0xD0000000 is available on AIX; -+ // choose the upper range. - raw_addr &= 0x3ffff000; -+#ifdef _AIX -+ raw_addr += 0x90000000; -+#else - raw_addr += 0x20000000; - #endif -+#endif - return reinterpret_cast(raw_addr); - } - return NULL; -@@ -125,7 +135,17 @@ void* OS::GetRandomMmapAddr() { - // Math functions - - double modulo(double x, double y) { -+#if defined(_AIX) -+ // AIX raises an underflow exception for (Number.MIN_VALUE % Number.MAX_VALUE) -+ double result; -+ int exception; -+ feclearexcept(FE_ALL_EXCEPT); -+ result = fmod(x, y); -+ exception = fetestexcept(FE_UNDERFLOW); -+ return (exception ? x : result); -+#else - return fmod(x, y); -+#endif - } - - -@@ -147,6 +167,11 @@ UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunc - #undef MATH_FUNCTION - - -+#ifdef _AIX -+#undef NAN -+#define NAN (__builtin_nanf("")) -+#endif -+ - double OS::nan_value() { - // NAN from math.h is defined in C99 and not in POSIX. - return NAN; -diff -up v8-3.14.5.10/src/ppc/assembler-ppc.cc.ppc v8-3.14.5.10/src/ppc/assembler-ppc.cc ---- v8-3.14.5.10/src/ppc/assembler-ppc.cc.ppc 2016-06-07 14:15:45.984393044 -0400 -+++ v8-3.14.5.10/src/ppc/assembler-ppc.cc 2016-06-07 14:15:45.984393044 -0400 -@@ -0,0 +1,1881 @@ -+// Copyright (c) 1994-2006 Sun Microsystems Inc. -+// All Rights Reserved. -+// -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions -+// are met: -+// -+// - Redistributions of source code must retain the above copyright notice, -+// this list of conditions and the following disclaimer. -+// -+// - Redistribution in binary form must reproduce the above copyright -+// notice, this list of conditions and the following disclaimer in the -+// documentation and/or other materials provided with the -+// distribution. -+// -+// - Neither the name of Sun Microsystems or the names of contributors may -+// be used to endorse or promote products derived from this software without -+// specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED -+// OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+// The original source code covered by the above license above has been -+// modified significantly by Google Inc. -+// Copyright 2012 the V8 project authors. All rights reserved. -+ -+// -+// Copyright IBM Corp. 2012, 2013. All rights reserved. -+// -+ -+#include "v8.h" -+ -+#if defined(V8_TARGET_ARCH_PPC) -+ -+#include "ppc/assembler-ppc-inl.h" -+#include "serialize.h" -+ -+namespace v8 { -+namespace internal { -+ -+#ifdef DEBUG -+bool CpuFeatures::initialized_ = false; -+#endif -+unsigned CpuFeatures::supported_ = 0; -+unsigned CpuFeatures::found_by_runtime_probing_ = 0; -+ -+// Get the CPU features enabled by the build. -+static unsigned CpuFeaturesImpliedByCompiler() { -+ unsigned answer = 0; -+ return answer; -+} -+ -+#if !defined(_AIX) -+// This function uses types in elf.h -+static bool is_processor(const char* p) { -+ static bool read_tried = false; -+ static char *auxv_cpu_type = NULL; -+ -+ if (!read_tried) { -+ // Open the AUXV (auxilliary vector) psuedo-file -+ int fd = open("/proc/self/auxv", O_RDONLY); -+ -+ read_tried = true; -+ if (fd != -1) { -+#if V8_TARGET_ARCH_PPC64 -+ static Elf64_auxv_t buffer[16]; -+ Elf64_auxv_t *auxv_element; -+#else -+ static Elf32_auxv_t buffer[16]; -+ Elf32_auxv_t *auxv_element; -+#endif -+ int bytes_read = 0; -+ while (bytes_read >= 0) { -+ // Read a chunk of the AUXV -+ bytes_read = read(fd, buffer, sizeof(buffer)); -+ // Locate and read the platform field of AUXV if it is in the chunk -+ for (auxv_element = buffer; -+ auxv_element+sizeof(auxv_element) <= buffer+bytes_read && -+ auxv_element->a_type != AT_NULL; -+ auxv_element++) { -+ if (auxv_element->a_type == AT_PLATFORM) { -+ /* Note: Both auxv_cpu_type and buffer are static */ -+ auxv_cpu_type = reinterpret_cast(auxv_element->a_un.a_val); -+ goto done_reading; -+ } -+ } -+ } -+ done_reading: -+ close(fd); -+ } -+ } -+ -+ if (auxv_cpu_type == NULL) { -+ return false; -+ } -+ return (strcmp(auxv_cpu_type, p) == 0); -+} -+#endif -+ -+void CpuFeatures::Probe() { -+ unsigned standard_features = static_cast( -+ OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler(); -+ ASSERT(supported_ == 0 || supported_ == standard_features); -+#ifdef DEBUG -+ initialized_ = true; -+#endif -+ -+ // Get the features implied by the OS and the compiler settings. This is the -+ // minimal set of features which is also alowed for generated code in the -+ // snapshot. -+ supported_ |= standard_features; -+ -+ if (Serializer::enabled()) { -+ // No probing for features if we might serialize (generate snapshot). -+ return; -+ } -+ -+ // Detect whether frim instruction is supported (POWER5+) -+ // For now we will just check for processors we know do not -+ // support it -+#if !defined(_AIX) -+ if (!is_processor("ppc970") /* G5 */ && !is_processor("ppc7450") /* G4 */) { -+ // Assume support -+ supported_ |= (1u << FPU); -+ } -+#else -+ // Fallback: assume frim is supported -- will implement processor -+ // detection for other PPC platforms in is_processor() if required -+ supported_ |= (1u << FPU); -+#endif -+} -+ -+Register ToRegister(int num) { -+ ASSERT(num >= 0 && num < kNumRegisters); -+ const Register kRegisters[] = { -+ r0, -+ sp, -+ r2, r3, r4, r5, r6, r7, r8, r9, r10, -+ r11, ip, r13, r14, r15, -+ r16, r17, r18, r19, r20, r21, r22, r23, r24, -+ r25, r26, r27, r28, r29, r30, fp -+ }; -+ return kRegisters[num]; -+} -+ -+ -+// ----------------------------------------------------------------------------- -+// Implementation of RelocInfo -+ -+const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE; -+ -+ -+bool RelocInfo::IsCodedSpecially() { -+ // The deserializer needs to know whether a pointer is specially -+ // coded. Being specially coded on PPC means that it is a lis/ori -+ // instruction sequence, and that is always the case inside code -+ // objects. -+ return true; -+} -+ -+ -+void RelocInfo::PatchCode(byte* instructions, int instruction_count) { -+ // Patch the code at the current address with the supplied instructions. -+ Instr* pc = reinterpret_cast(pc_); -+ Instr* instr = reinterpret_cast(instructions); -+ for (int i = 0; i < instruction_count; i++) { -+ *(pc + i) = *(instr + i); -+ } -+ -+ // Indicate that code has changed. -+ CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize); -+} -+ -+ -+// Patch the code at the current PC with a call to the target address. -+// Additional guard instructions can be added if required. -+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { -+ // Patch the code at the current address with a call to the target. -+ UNIMPLEMENTED(); -+} -+ -+ -+// ----------------------------------------------------------------------------- -+// Implementation of Operand and MemOperand -+// See assembler-ppc-inl.h for inlined constructors -+ -+Operand::Operand(Handle handle) { -+ rm_ = no_reg; -+ // Verify all Objects referred by code are NOT in new space. -+ Object* obj = *handle; -+ ASSERT(!HEAP->InNewSpace(obj)); -+ if (obj->IsHeapObject()) { -+ imm_ = reinterpret_cast(handle.location()); -+ rmode_ = RelocInfo::EMBEDDED_OBJECT; -+ } else { -+ // no relocation needed -+ imm_ = reinterpret_cast(obj); -+ rmode_ = RelocInfo::NONE; -+ } -+} -+ -+MemOperand::MemOperand(Register rn, int32_t offset) { -+ ra_ = rn; -+ rb_ = no_reg; -+ offset_ = offset; -+} -+ -+MemOperand::MemOperand(Register ra, Register rb) { -+ ra_ = ra; -+ rb_ = rb; -+ offset_ = 0; -+} -+ -+// ----------------------------------------------------------------------------- -+// Specific instructions, constants, and masks. -+ -+// Spare buffer. -+static const int kMinimalBufferSize = 4*KB; -+ -+ -+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) -+ : AssemblerBase(arg_isolate), -+ recorded_ast_id_(TypeFeedbackId::None()), -+ positions_recorder_(this), -+ emit_debug_code_(FLAG_debug_code), -+ predictable_code_size_(false) { -+ if (buffer == NULL) { -+ // Do our own buffer management. -+ if (buffer_size <= kMinimalBufferSize) { -+ buffer_size = kMinimalBufferSize; -+ -+ if (isolate()->assembler_spare_buffer() != NULL) { -+ buffer = isolate()->assembler_spare_buffer(); -+ isolate()->set_assembler_spare_buffer(NULL); -+ } -+ } -+ if (buffer == NULL) { -+ buffer_ = NewArray(buffer_size); -+ } else { -+ buffer_ = static_cast(buffer); -+ } -+ buffer_size_ = buffer_size; -+ own_buffer_ = true; -+ -+ } else { -+ // Use externally provided buffer instead. -+ ASSERT(buffer_size > 0); -+ buffer_ = static_cast(buffer); -+ buffer_size_ = buffer_size; -+ own_buffer_ = false; -+ } -+ -+ // Set up buffer pointers. -+ ASSERT(buffer_ != NULL); -+ pc_ = buffer_; -+ reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); -+ -+ no_trampoline_pool_before_ = 0; -+ trampoline_pool_blocked_nesting_ = 0; -+ // We leave space (kMaxBlockTrampolineSectionSize) -+ // for BlockTrampolinePoolScope buffer. -+ next_buffer_check_ = kMaxCondBranchReach - kMaxBlockTrampolineSectionSize; -+ internal_trampoline_exception_ = false; -+ last_bound_pos_ = 0; -+ -+ trampoline_emitted_ = false; -+ unbound_labels_count_ = 0; -+ -+ ClearRecordedAstId(); -+} -+ -+ -+Assembler::~Assembler() { -+ if (own_buffer_) { -+ if (isolate()->assembler_spare_buffer() == NULL && -+ buffer_size_ == kMinimalBufferSize) { -+ isolate()->set_assembler_spare_buffer(buffer_); -+ } else { -+ DeleteArray(buffer_); -+ } -+ } -+} -+ -+ -+void Assembler::GetCode(CodeDesc* desc) { -+ // Set up code descriptor. -+ desc->buffer = buffer_; -+ desc->buffer_size = buffer_size_; -+ desc->instr_size = pc_offset(); -+ desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); -+} -+ -+ -+void Assembler::Align(int m) { -+ ASSERT(m >= 4 && IsPowerOf2(m)); -+ while ((pc_offset() & (m - 1)) != 0) { -+ nop(); -+ } -+} -+ -+ -+void Assembler::CodeTargetAlign() { -+ Align(8); -+} -+ -+ -+Condition Assembler::GetCondition(Instr instr) { -+ switch (instr & kCondMask) { -+ case BT: -+ return eq; -+ case BF: -+ return ne; -+ default: -+ UNIMPLEMENTED(); -+ } -+ return al; -+} -+ -+// PowerPC -+ -+bool Assembler::IsLis(Instr instr) { -+ return (instr & kOpcodeMask) == ADDIS; -+} -+ -+bool Assembler::IsAddic(Instr instr) { -+ return (instr & kOpcodeMask) == ADDIC; -+} -+ -+bool Assembler::IsOri(Instr instr) { -+ return (instr & kOpcodeMask) == ORI; -+} -+ -+ -+bool Assembler::IsBranch(Instr instr) { -+ return ((instr & kOpcodeMask) == BCX); -+} -+ -+// end PowerPC -+ -+Register Assembler::GetRA(Instr instr) { -+ Register reg; -+ reg.code_ = Instruction::RAValue(instr); -+ return reg; -+} -+ -+Register Assembler::GetRB(Instr instr) { -+ Register reg; -+ reg.code_ = Instruction::RBValue(instr); -+ return reg; -+} -+ -+#if V8_TARGET_ARCH_PPC64 -+// This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori) -+bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, -+ Instr instr3, Instr instr4, Instr instr5) { -+ // Check the instructions are indeed a five part load (into r12) -+ // 3d800000 lis r12, 0 -+ // 618c0000 ori r12, r12, 0 -+ // 798c07c6 rldicr r12, r12, 32, 31 -+ // 658c00c3 oris r12, r12, 195 -+ // 618ccd40 ori r12, r12, 52544 -+ return(((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c) && -+ (instr3 == 0x798c07c6) && -+ ((instr4 >> 16) == 0x658c) && ((instr5 >> 16) == 0x618c)); -+} -+#else -+// This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori) -+bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) { -+ // Check the instruction is indeed a two part load (into r12) -+ // 3d802553 lis r12, 9555 -+ // 618c5000 ori r12, r12, 20480 -+ return(((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c)); -+} -+#endif -+ -+bool Assembler::IsCmpRegister(Instr instr) { -+ return (((instr & kOpcodeMask) == EXT2) && -+ ((instr & kExt2OpcodeMask) == CMP)); -+} -+ -+bool Assembler::IsRlwinm(Instr instr) { -+ return ((instr & kOpcodeMask) == RLWINMX); -+} -+ -+#if V8_TARGET_ARCH_PPC64 -+bool Assembler::IsRldicl(Instr instr) { -+ return (((instr & kOpcodeMask) == EXT5) && -+ ((instr & kExt5OpcodeMask) == RLDICL)); -+} -+#endif -+ -+bool Assembler::IsCmpImmediate(Instr instr) { -+ return ((instr & kOpcodeMask) == CMPI); -+} -+ -+Register Assembler::GetCmpImmediateRegister(Instr instr) { -+ ASSERT(IsCmpImmediate(instr)); -+ return GetRA(instr); -+} -+ -+int Assembler::GetCmpImmediateRawImmediate(Instr instr) { -+ ASSERT(IsCmpImmediate(instr)); -+ return instr & kOff16Mask; -+} -+ -+// Labels refer to positions in the (to be) generated code. -+// There are bound, linked, and unused labels. -+// -+// Bound labels refer to known positions in the already -+// generated code. pos() is the position the label refers to. -+// -+// Linked labels refer to unknown positions in the code -+// to be generated; pos() is the position of the last -+// instruction using the label. -+ -+ -+// The link chain is terminated by a negative code position (must be aligned) -+const int kEndOfChain = -4; -+ -+ -+int Assembler::target_at(int pos) { -+ Instr instr = instr_at(pos); -+ // check which type of branch this is 16 or 26 bit offset -+ int opcode = instr & kOpcodeMask; -+ if (BX == opcode) { -+ int imm26 = ((instr & kImm26Mask) << 6) >> 6; -+ imm26 &= ~(kAAMask|kLKMask); // discard AA|LK bits if present -+ if (imm26 == 0) -+ return kEndOfChain; -+ return pos + imm26; -+ } else if (BCX == opcode) { -+ int imm16 = SIGN_EXT_IMM16((instr & kImm16Mask)); -+ imm16 &= ~(kAAMask|kLKMask); // discard AA|LK bits if present -+ if (imm16 == 0) -+ return kEndOfChain; -+ return pos + imm16; -+ } else if ((instr & ~kImm16Mask) == 0) { -+ // Emitted label constant, not part of a branch (regexp PushBacktrack). -+ if (instr == 0) { -+ return kEndOfChain; -+ } else { -+ int32_t imm16 = SIGN_EXT_IMM16(instr); -+ return (imm16 + pos); -+ } -+ } -+ -+ PPCPORT_UNIMPLEMENTED(); -+ ASSERT(false); -+ return -1; -+} -+ -+void Assembler::target_at_put(int pos, int target_pos) { -+ Instr instr = instr_at(pos); -+ int opcode = instr & kOpcodeMask; -+ -+ // check which type of branch this is 16 or 26 bit offset -+ if (BX == opcode) { -+ int imm26 = target_pos - pos; -+ ASSERT((imm26 & (kAAMask|kLKMask)) == 0); -+ instr &= ((~kImm26Mask)|kAAMask|kLKMask); -+ ASSERT(is_int26(imm26)); -+ instr_at_put(pos, instr | (imm26 & kImm26Mask)); -+ return; -+ } else if (BCX == opcode) { -+ int imm16 = target_pos - pos; -+ ASSERT((imm16 & (kAAMask|kLKMask)) == 0); -+ instr &= ((~kImm16Mask)|kAAMask|kLKMask); -+ ASSERT(is_int16(imm16)); -+ instr_at_put(pos, instr | (imm16 & kImm16Mask)); -+ return; -+ } else if ((instr & ~kImm16Mask) == 0) { -+ ASSERT(target_pos == kEndOfChain || target_pos >= 0); -+ // Emitted label constant, not part of a branch. -+ // Make label relative to Code* of generated Code object. -+ instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); -+ return; -+ } -+ -+ ASSERT(false); -+} -+ -+int Assembler::max_reach_from(int pos) { -+ Instr instr = instr_at(pos); -+ int opcode = instr & kOpcodeMask; -+ -+ // check which type of branch this is 16 or 26 bit offset -+ if (BX == opcode) { -+ return 26; -+ } else if (BCX == opcode) { -+ return 16; -+ } else if ((instr & ~kImm16Mask) == 0) { -+ // Emitted label constant, not part of a branch (regexp PushBacktrack). -+ return 16; -+ } -+ -+ ASSERT(false); -+ return 0; -+} -+ -+void Assembler::bind_to(Label* L, int pos) { -+ ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position -+ int32_t trampoline_pos = kInvalidSlotPos; -+ if (L->is_linked() && !trampoline_emitted_) { -+ unbound_labels_count_--; -+ next_buffer_check_ += kTrampolineSlotsSize; -+ } -+ -+ while (L->is_linked()) { -+ int fixup_pos = L->pos(); -+ int32_t offset = pos - fixup_pos; -+ int maxReach = max_reach_from(fixup_pos); -+ next(L); // call next before overwriting link with target at fixup_pos -+ if (is_intn(offset, maxReach) == false) { -+ if (trampoline_pos == kInvalidSlotPos) { -+ trampoline_pos = get_trampoline_entry(); -+ CHECK(trampoline_pos != kInvalidSlotPos); -+ target_at_put(trampoline_pos, pos); -+ } -+ target_at_put(fixup_pos, trampoline_pos); -+ } else { -+ target_at_put(fixup_pos, pos); -+ } -+ } -+ L->bind_to(pos); -+ -+ // Keep track of the last bound label so we don't eliminate any instructions -+ // before a bound label. -+ if (pos > last_bound_pos_) -+ last_bound_pos_ = pos; -+} -+ -+void Assembler::bind(Label* L) { -+ ASSERT(!L->is_bound()); // label can only be bound once -+ bind_to(L, pc_offset()); -+} -+ -+ -+void Assembler::next(Label* L) { -+ ASSERT(L->is_linked()); -+ int link = target_at(L->pos()); -+ if (link == kEndOfChain) { -+ L->Unuse(); -+ } else { -+ ASSERT(link >= 0); -+ L->link_to(link); -+ } -+} -+ -+bool Assembler::is_near(Label* L, Condition cond) { -+ ASSERT(L->is_bound()); -+ if (L->is_bound() == false) -+ return false; -+ -+ int maxReach = ((cond == al) ? 26 : 16); -+ int offset = L->pos() - pc_offset(); -+ -+ return is_intn(offset, maxReach); -+} -+ -+void Assembler::a_form(Instr instr, -+ DwVfpRegister frt, -+ DwVfpRegister fra, -+ DwVfpRegister frb, -+ RCBit r) { -+ emit(instr | frt.code()*B21 | fra.code()*B16 | frb.code()*B11 | r); -+} -+ -+void Assembler::d_form(Instr instr, -+ Register rt, -+ Register ra, -+ const intptr_t val, -+ bool signed_disp) { -+ if (signed_disp) { -+ if (!is_int16(val)) { -+ PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val); -+ } -+ ASSERT(is_int16(val)); -+ } else { -+ if (!is_uint16(val)) { -+ PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR -+ ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n", -+ val, val, is_uint16(val), kImm16Mask); -+ } -+ ASSERT(is_uint16(val)); -+ } -+ emit(instr | rt.code()*B21 | ra.code()*B16 | (kImm16Mask & val)); -+} -+ -+void Assembler::x_form(Instr instr, -+ Register ra, -+ Register rs, -+ Register rb, -+ RCBit r) { -+ emit(instr | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | r); -+} -+ -+void Assembler::xo_form(Instr instr, -+ Register rt, -+ Register ra, -+ Register rb, -+ OEBit o, -+ RCBit r) { -+ emit(instr | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | o | r); -+} -+ -+void Assembler::md_form(Instr instr, -+ Register ra, -+ Register rs, -+ int shift, -+ int maskbit, -+ RCBit r) { -+ int sh0_4 = shift & 0x1f; -+ int sh5 = (shift >> 5) & 0x1; -+ int m0_4 = maskbit & 0x1f; -+ int m5 = (maskbit >> 5) & 0x1; -+ -+ emit(instr | rs.code()*B21 | ra.code()*B16 | -+ sh0_4*B11 | m0_4*B6 | m5*B5 | sh5*B1 | r); -+} -+ -+// Returns the next free trampoline entry. -+int32_t Assembler::get_trampoline_entry() { -+ int32_t trampoline_entry = kInvalidSlotPos; -+ -+ if (!internal_trampoline_exception_) { -+ trampoline_entry = trampoline_.take_slot(); -+ -+ if (kInvalidSlotPos == trampoline_entry) { -+ internal_trampoline_exception_ = true; -+ } -+ } -+ return trampoline_entry; -+} -+ -+int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { -+ int target_pos; -+ if (L->is_bound()) { -+ target_pos = L->pos(); -+ } else { -+ if (L->is_linked()) { -+ target_pos = L->pos(); // L's link -+ } else { -+ // was: target_pos = kEndOfChain; -+ // However, using branch to self to mark the first reference -+ // should avoid most instances of branch offset overflow. See -+ // target_at() for where this is converted back to kEndOfChain. -+ target_pos = pc_offset(); -+ if (!trampoline_emitted_) { -+ unbound_labels_count_++; -+ next_buffer_check_ -= kTrampolineSlotsSize; -+ } -+ } -+ L->link_to(pc_offset()); -+ } -+ -+ return target_pos - pc_offset(); -+} -+ -+ -+void Assembler::label_at_put(Label* L, int at_offset) { -+ int target_pos; -+ if (L->is_bound()) { -+ target_pos = L->pos(); -+ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); -+ } else { -+ if (L->is_linked()) { -+ target_pos = L->pos(); // L's link -+ } else { -+ // was: target_pos = kEndOfChain; -+ // However, using branch to self to mark the first reference -+ // should avoid most instances of branch offset overflow. See -+ // target_at() for where this is converted back to kEndOfChain. -+ target_pos = at_offset; -+ if (!trampoline_emitted_) { -+ unbound_labels_count_++; -+ next_buffer_check_ -= kTrampolineSlotsSize; -+ } -+ } -+ L->link_to(at_offset); -+ -+ Instr constant = target_pos - at_offset; -+ ASSERT(is_int16(constant)); -+ instr_at_put(at_offset, constant); -+ } -+} -+ -+ -+// Branch instructions. -+ -+// PowerPC -+void Assembler::bclr(BOfield bo, LKBit lk) { -+ positions_recorder()->WriteRecordedPositions(); -+ emit(EXT1 | bo | BCLRX | lk); -+} -+ -+void Assembler::bcctr(BOfield bo, LKBit lk) { -+ positions_recorder()->WriteRecordedPositions(); -+ emit(EXT1 | bo | BCCTRX | lk); -+} -+ -+// Pseudo op - branch to link register -+void Assembler::blr() { -+ bclr(BA, LeaveLK); -+} -+ -+// Pseudo op - branch to count register -- used for "jump" -+void Assembler::bcr() { -+ bcctr(BA, LeaveLK); -+} -+ -+void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) { -+ positions_recorder()->WriteRecordedPositions(); -+ ASSERT(is_int16(branch_offset)); -+ emit(BCX | bo | condition_bit*B16 | (kImm16Mask & branch_offset) | lk); -+} -+ -+void Assembler::b(int branch_offset, LKBit lk) { -+ positions_recorder()->WriteRecordedPositions(); -+ ASSERT((branch_offset & 3) == 0); -+ int imm26 = branch_offset; -+ ASSERT(is_int26(imm26)); -+ // todo add AA and LK bits -+ emit(BX | (imm26 & kImm26Mask) | lk); -+} -+ -+void Assembler::xori(Register dst, Register src, const Operand& imm) { -+ d_form(XORI, src, dst, imm.imm_, false); -+} -+ -+void Assembler::xoris(Register ra, Register rs, const Operand& imm) { -+ d_form(XORIS, rs, ra, imm.imm_, false); -+} -+ -+void Assembler::xor_(Register dst, Register src1, Register src2, RCBit rc) { -+ x_form(EXT2 | XORX, dst, src1, src2, rc); -+} -+ -+void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) { -+ x_form(EXT2 | CNTLZWX, ra, rs, r0, rc); -+} -+ -+void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) { -+ x_form(EXT2 | ANDX, ra, rs, rb, rc); -+} -+ -+ -+void Assembler::rlwinm(Register ra, Register rs, -+ int sh, int mb, int me, RCBit rc) { -+ sh &= 0x1f; -+ mb &= 0x1f; -+ me &= 0x1f; -+ emit(RLWINMX | rs.code()*B21 | ra.code()*B16 | sh*B11 | mb*B6 | me << 1 | rc); -+} -+ -+void Assembler::rlwimi(Register ra, Register rs, -+ int sh, int mb, int me, RCBit rc) { -+ sh &= 0x1f; -+ mb &= 0x1f; -+ me &= 0x1f; -+ emit(RLWIMIX | rs.code()*B21 | ra.code()*B16 | sh*B11 | mb*B6 | me << 1 | rc); -+} -+ -+void Assembler::slwi(Register dst, Register src, const Operand& val, -+ RCBit rc) { -+ ASSERT((32 > val.imm_)&&(val.imm_ >= 0)); -+ rlwinm(dst, src, val.imm_, 0, 31-val.imm_, rc); -+} -+void Assembler::srwi(Register dst, Register src, const Operand& val, -+ RCBit rc) { -+ ASSERT((32 > val.imm_)&&(val.imm_ >= 0)); -+ rlwinm(dst, src, 32-val.imm_, val.imm_, 31, rc); -+} -+void Assembler::clrrwi(Register dst, Register src, const Operand& val, -+ RCBit rc) { -+ ASSERT((32 > val.imm_)&&(val.imm_ >= 0)); -+ rlwinm(dst, src, 0, 0, 31-val.imm_, rc); -+} -+void Assembler::clrlwi(Register dst, Register src, const Operand& val, -+ RCBit rc) { -+ ASSERT((32 > val.imm_)&&(val.imm_ >= 0)); -+ rlwinm(dst, src, 0, val.imm_, 31, rc); -+} -+ -+ -+void Assembler::srawi(Register ra, Register rs, int sh, RCBit r) { -+ emit(EXT2 | SRAWIX | rs.code()*B21 | ra.code()*B16 | sh*B11 | r); -+} -+ -+void Assembler::srw(Register dst, Register src1, Register src2, RCBit r) { -+ x_form(EXT2 | SRWX, dst, src1, src2, r); -+} -+ -+void Assembler::slw(Register dst, Register src1, Register src2, RCBit r) { -+ x_form(EXT2 | SLWX, dst, src1, src2, r); -+} -+ -+void Assembler::sraw(Register ra, Register rs, Register rb, RCBit r) { -+ x_form(EXT2 | SRAW, ra, rs, rb, r); -+} -+ -+void Assembler::subi(Register dst, Register src, const Operand& imm) { -+ addi(dst, src, Operand(-(imm.imm_))); -+} -+ -+void Assembler::addc(Register dst, Register src1, Register src2, -+ OEBit o, RCBit r) { -+ xo_form(EXT2 | ADDCX, dst, src1, src2, o, r); -+} -+ -+void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) { -+ // a special xo_form -+ emit(EXT2 | ADDZEX | dst.code()*B21 | src1.code()*B16 | o | r); -+} -+ -+void Assembler::sub(Register dst, Register src1, Register src2, -+ OEBit o, RCBit r) { -+ xo_form(EXT2 | SUBFX, dst, src2, src1, o, r); -+} -+ -+void Assembler::subfc(Register dst, Register src1, Register src2, -+ OEBit o, RCBit r) { -+ xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r); -+} -+ -+void Assembler::subfic(Register dst, Register src, const Operand& imm) { -+ d_form(SUBFIC, dst, src, imm.imm_, true); -+} -+ -+void Assembler::add(Register dst, Register src1, Register src2, -+ OEBit o, RCBit r) { -+ xo_form(EXT2 | ADDX, dst, src1, src2, o, r); -+} -+ -+// Multiply low word -+void Assembler::mullw(Register dst, Register src1, Register src2, -+ OEBit o, RCBit r) { -+ xo_form(EXT2 | MULLW, dst, src1, src2, o, r); -+} -+ -+// Multiply hi word -+void Assembler::mulhw(Register dst, Register src1, Register src2, -+ OEBit o, RCBit r) { -+ xo_form(EXT2 | MULHWX, dst, src1, src2, o, r); -+} -+ -+// Divide word -+void Assembler::divw(Register dst, Register src1, Register src2, -+ OEBit o, RCBit r) { -+ xo_form(EXT2 | DIVW, dst, src1, src2, o, r); -+} -+ -+void Assembler::addi(Register dst, Register src, const Operand& imm) { -+ ASSERT(!src.is(r0)); // use li instead to show intent -+ d_form(ADDI, dst, src, imm.imm_, true); -+} -+ -+void Assembler::addis(Register dst, Register src, const Operand& imm) { -+ ASSERT(!src.is(r0)); // use lis instead to show intent -+ d_form(ADDIS, dst, src, imm.imm_, true); -+} -+ -+void Assembler::addic(Register dst, Register src, const Operand& imm) { -+ d_form(ADDIC, dst, src, imm.imm_, true); -+} -+ -+void Assembler::andi(Register ra, Register rs, const Operand& imm) { -+ d_form(ANDIx, rs, ra, imm.imm_, false); -+} -+ -+void Assembler::andis(Register ra, Register rs, const Operand& imm) { -+ d_form(ANDISx, rs, ra, imm.imm_, false); -+} -+ -+void Assembler::nor(Register dst, Register src1, Register src2, RCBit r) { -+ x_form(EXT2 | NORX, dst, src1, src2, r); -+} -+ -+void Assembler::notx(Register dst, Register src, RCBit r) { -+ x_form(EXT2 | NORX, dst, src, src, r); -+} -+ -+void Assembler::ori(Register ra, Register rs, const Operand& imm) { -+ d_form(ORI, rs, ra, imm.imm_, false); -+} -+ -+void Assembler::oris(Register dst, Register src, const Operand& imm) { -+ d_form(ORIS, src, dst, imm.imm_, false); -+} -+ -+void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) { -+ x_form(EXT2 | ORX, dst, src1, src2, rc); -+} -+ -+void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) { -+ intptr_t imm16 = src2.imm_; -+#if V8_TARGET_ARCH_PPC64 -+ int L = 1; -+#else -+ int L = 0; -+#endif -+ ASSERT(is_int16(imm16)); -+ ASSERT(cr.code() >= 0 && cr.code() <= 7); -+ imm16 &= kImm16Mask; -+ emit(CMPI | cr.code()*B23 | L*B21 | src1.code()*B16 | imm16); -+} -+ -+void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) { -+ uintptr_t uimm16 = src2.imm_; -+#if V8_TARGET_ARCH_PPC64 -+ int L = 1; -+#else -+ int L = 0; -+#endif -+ ASSERT(is_uint16(uimm16)); -+ ASSERT(cr.code() >= 0 && cr.code() <= 7); -+ uimm16 &= kImm16Mask; -+ emit(CMPLI | cr.code()*B23 | L*B21 | src1.code()*B16 | uimm16); -+} -+ -+void Assembler::cmp(Register src1, Register src2, CRegister cr) { -+#if V8_TARGET_ARCH_PPC64 -+ int L = 1; -+#else -+ int L = 0; -+#endif -+ ASSERT(cr.code() >= 0 && cr.code() <= 7); -+ emit(EXT2 | CMP | cr.code()*B23 | L*B21 | src1.code()*B16 | -+ src2.code()*B11); -+} -+ -+void Assembler::cmpl(Register src1, Register src2, CRegister cr) { -+#if V8_TARGET_ARCH_PPC64 -+ int L = 1; -+#else -+ int L = 0; -+#endif -+ ASSERT(cr.code() >= 0 && cr.code() <= 7); -+ emit(EXT2 | CMPL | cr.code()*B23 | L*B21 | src1.code()*B16 | -+ src2.code()*B11); -+} -+ -+// Pseudo op - load immediate -+void Assembler::li(Register dst, const Operand &imm) { -+ d_form(ADDI, dst, r0, imm.imm_, true); -+} -+ -+void Assembler::lis(Register dst, const Operand& imm) { -+ d_form(ADDIS, dst, r0, imm.imm_, true); -+} -+ -+// Pseudo op - move register -+void Assembler::mr(Register dst, Register src) { -+ // actually or(dst, src, src) -+ orx(dst, src, src); -+} -+ -+void Assembler::lbz(Register dst, const MemOperand &src) { -+ ASSERT(!src.ra_.is(r0)); -+ d_form(LBZ, dst, src.ra(), src.offset(), true); -+} -+ -+void Assembler::lbzx(Register rt, const MemOperand &src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | LBZX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); -+} -+ -+void Assembler::lbzux(Register rt, const MemOperand & src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | LBZUX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); -+} -+ -+void Assembler::lhz(Register dst, const MemOperand &src) { -+ ASSERT(!src.ra_.is(r0)); -+ d_form(LHZ, dst, src.ra(), src.offset(), true); -+} -+ -+void Assembler::lhzx(Register rt, const MemOperand &src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | LHZX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); -+} -+ -+void Assembler::lhzux(Register rt, const MemOperand & src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | LHZUX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); -+} -+ -+void Assembler::lwz(Register dst, const MemOperand &src) { -+ ASSERT(!src.ra_.is(r0)); -+ d_form(LWZ, dst, src.ra(), src.offset(), true); -+} -+ -+void Assembler::lwzu(Register dst, const MemOperand &src) { -+ ASSERT(!src.ra_.is(r0)); -+ d_form(LWZU, dst, src.ra(), src.offset(), true); -+} -+ -+void Assembler::lwzx(Register rt, const MemOperand &src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | LWZX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); -+} -+ -+void Assembler::lwzux(Register rt, const MemOperand & src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | LWZUX | rt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); -+} -+ -+void Assembler::lwa(Register dst, const MemOperand &src) { -+#if V8_TARGET_ARCH_PPC64 -+ int offset = src.offset(); -+ ASSERT(!src.ra_.is(r0)); -+ ASSERT(!(offset & 3) && is_int16(offset)); -+ offset = kImm16Mask & offset; -+ emit(LD | dst.code()*B21 | src.ra().code()*B16 | offset | 2); -+#else -+ lwz(dst, src); -+#endif -+} -+ -+void Assembler::stb(Register dst, const MemOperand &src) { -+ ASSERT(!src.ra_.is(r0)); -+ d_form(STB, dst, src.ra(), src.offset(), true); -+} -+ -+void Assembler::stbx(Register rs, const MemOperand &src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | STBX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); -+} -+ -+void Assembler::stbux(Register rs, const MemOperand &src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | STBUX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); -+} -+ -+void Assembler::sth(Register dst, const MemOperand &src) { -+ ASSERT(!src.ra_.is(r0)); -+ d_form(STH, dst, src.ra(), src.offset(), true); -+} -+ -+void Assembler::sthx(Register rs, const MemOperand &src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | STHX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); -+} -+ -+void Assembler::sthux(Register rs, const MemOperand &src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | STHUX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); -+} -+ -+void Assembler::stw(Register dst, const MemOperand &src) { -+ ASSERT(!src.ra_.is(r0)); -+ d_form(STW, dst, src.ra(), src.offset(), true); -+} -+ -+void Assembler::stwu(Register dst, const MemOperand &src) { -+ ASSERT(!src.ra_.is(r0)); -+ d_form(STWU, dst, src.ra(), src.offset(), true); -+} -+ -+void Assembler::stwx(Register rs, const MemOperand &src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | STWX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); -+} -+ -+void Assembler::stwux(Register rs, const MemOperand &src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | STWUX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); -+} -+ -+void Assembler::extsb(Register rs, Register ra, RCBit rc) { -+ emit(EXT2 | EXTSB | ra.code()*B21 | rs.code()*B16 | rc); -+} -+ -+void Assembler::extsh(Register rs, Register ra, RCBit rc) { -+ emit(EXT2 | EXTSH | ra.code()*B21 | rs.code()*B16 | rc); -+} -+ -+void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) { -+ emit(EXT2 | NEGX | rt.code()*B21 | ra.code()*B16 | o | r); -+} -+ -+void Assembler::andc(Register dst, Register src1, Register src2, RCBit rc) { -+ x_form(EXT2 | ANDCX, dst, src1, src2, rc); -+} -+ -+#if V8_TARGET_ARCH_PPC64 -+// 64bit specific instructions -+void Assembler::ld(Register rd, const MemOperand &src) { -+ int offset = src.offset(); -+ ASSERT(!src.ra_.is(r0)); -+ ASSERT(!(offset & 3) && is_int16(offset)); -+ offset = kImm16Mask & offset; -+ emit(LD | rd.code()*B21 | src.ra().code()*B16 | offset); -+} -+ -+void Assembler::ldx(Register rd, const MemOperand &src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | LDX | rd.code()*B21 | ra.code()*B16 | rb.code()*B11); -+} -+ -+void Assembler::ldu(Register rd, const MemOperand &src) { -+ int offset = src.offset(); -+ ASSERT(!src.ra_.is(r0)); -+ ASSERT(!(offset & 3) && is_int16(offset)); -+ offset = kImm16Mask & offset; -+ emit(LD | rd.code()*B21 | src.ra().code()*B16 | offset | 1); -+} -+ -+void Assembler::ldux(Register rd, const MemOperand &src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | LDUX | rd.code()*B21 | ra.code()*B16 | rb.code()*B11); -+} -+ -+void Assembler::std(Register rs, const MemOperand &src) { -+ int offset = src.offset(); -+ ASSERT(!src.ra_.is(r0)); -+ ASSERT(!(offset & 3) && is_int16(offset)); -+ offset = kImm16Mask & offset; -+ emit(STD | rs.code()*B21 | src.ra().code()*B16 | offset); -+} -+ -+void Assembler::stdx(Register rs, const MemOperand &src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | STDX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11); -+} -+ -+void Assembler::stdu(Register rs, const MemOperand &src) { -+ int offset = src.offset(); -+ ASSERT(!src.ra_.is(r0)); -+ ASSERT(!(offset & 3) && is_int16(offset)); -+ offset = kImm16Mask & offset; -+ emit(STD | rs.code()*B21 | src.ra().code()*B16 | offset | 1); -+} -+ -+void Assembler::stdux(Register rs, const MemOperand &src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | STDUX | rs.code()*B21 | ra.code()*B16 | rb.code()*B11); -+} -+ -+void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) { -+ md_form(EXT5 | RLDIC, ra, rs, sh, mb, r); -+} -+ -+void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) { -+ md_form(EXT5 | RLDICL, ra, rs, sh, mb, r); -+} -+ -+void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) { -+ md_form(EXT5 | RLDICR, ra, rs, sh, me, r); -+} -+ -+void Assembler::sldi(Register dst, Register src, const Operand& val, -+ RCBit rc) { -+ ASSERT((64 > val.imm_)&&(val.imm_ >= 0)); -+ rldicr(dst, src, val.imm_, 63-val.imm_, rc); -+} -+void Assembler::srdi(Register dst, Register src, const Operand& val, -+ RCBit rc) { -+ ASSERT((64 > val.imm_)&&(val.imm_ >= 0)); -+ rldicl(dst, src, 64-val.imm_, val.imm_, rc); -+} -+void Assembler::clrrdi(Register dst, Register src, const Operand& val, -+ RCBit rc) { -+ ASSERT((64 > val.imm_)&&(val.imm_ >= 0)); -+ rldicr(dst, src, 0, 63-val.imm_, rc); -+} -+void Assembler::clrldi(Register dst, Register src, const Operand& val, -+ RCBit rc) { -+ ASSERT((64 > val.imm_)&&(val.imm_ >= 0)); -+ rldicl(dst, src, 0, val.imm_, rc); -+} -+ -+ -+void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) { -+ md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r); -+} -+ -+ -+void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) { -+ int sh0_4 = sh & 0x1f; -+ int sh5 = (sh >> 5) & 0x1; -+ -+ emit(EXT2 | SRADIX | rs.code()*B21 | ra.code()*B16 | sh0_4*B11 | sh5*B1 | r); -+} -+ -+void Assembler::srd(Register dst, Register src1, Register src2, RCBit r) { -+ x_form(EXT2 | SRDX, dst, src1, src2, r); -+} -+ -+void Assembler::sld(Register dst, Register src1, Register src2, RCBit r) { -+ x_form(EXT2 | SLDX, dst, src1, src2, r); -+} -+ -+void Assembler::srad(Register ra, Register rs, Register rb, RCBit r) { -+ x_form(EXT2 | SRAD, ra, rs, rb, r); -+} -+ -+void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) { -+ x_form(EXT2 | CNTLZDX, ra, rs, r0, rc); -+} -+ -+void Assembler::extsw(Register rs, Register ra, RCBit rc) { -+ emit(EXT2 | EXTSW | ra.code()*B21 | rs.code()*B16 | rc); -+} -+ -+void Assembler::mulld(Register dst, Register src1, Register src2, -+ OEBit o, RCBit r) { -+ xo_form(EXT2 | MULLD, dst, src1, src2, o, r); -+} -+ -+void Assembler::divd(Register dst, Register src1, Register src2, -+ OEBit o, RCBit r) { -+ xo_form(EXT2 | DIVD, dst, src1, src2, o, r); -+} -+#endif -+ -+ -+void Assembler::fake_asm(enum FAKE_OPCODE_T fopcode) { -+ ASSERT(fopcode < fLastFaker); -+ emit(FAKE_OPCODE | FAKER_SUBOPCODE | fopcode); -+} -+ -+void Assembler::marker_asm(int mcode) { -+ if (::v8::internal::FLAG_trace_sim_stubs) { -+ ASSERT(mcode < F_NEXT_AVAILABLE_STUB_MARKER); -+ emit(FAKE_OPCODE | MARKER_SUBOPCODE | mcode); -+ } -+} -+ -+// Function descriptor for AIX. -+// Code address skips the function descriptor "header". -+// TOC and static chain are ignored and set to 0. -+void Assembler::function_descriptor() { -+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); -+#if V8_TARGET_ARCH_PPC64 -+ uint64_t value = reinterpret_cast(pc_) + 3 * kPointerSize; -+#if __BYTE_ORDER == __LITTLE_ENDIAN -+ emit(static_cast(value & 0xFFFFFFFF)); -+ emit(static_cast(value >> 32)); -+#else -+ emit(static_cast(value >> 32)); -+ emit(static_cast(value & 0xFFFFFFFF)); -+#endif -+ emit(static_cast(0)); -+ emit(static_cast(0)); -+ emit(static_cast(0)); -+ emit(static_cast(0)); -+#else -+ emit(reinterpret_cast(pc_) + 3 * kPointerSize); -+ emit(static_cast(0)); -+ emit(static_cast(0)); -+#endif -+} -+// end PowerPC -+ -+// Primarily used for loading constants -+// This should really move to be in macro-assembler as it -+// is really a pseudo instruction -+// Some usages of this intend for a FIXED_SEQUENCE to be used -+// Todo - break this dependency so we can optimize mov() in general -+// and only use the generic version when we require a fixed sequence -+void Assembler::mov(Register dst, const Operand& src) { -+ BlockTrampolinePoolScope block_trampoline_pool(this); -+ if (src.rmode_ != RelocInfo::NONE) { -+ // some form of relocation needed -+ RecordRelocInfo(src.rmode_, src.imm_); -+ } -+ -+#if V8_TARGET_ARCH_PPC64 -+ int64_t value = src.immediate(); -+ int32_t hi_32 = static_cast(value) >> 32; -+ int32_t lo_32 = static_cast(value); -+ int hi_word = static_cast(hi_32) >> 16; -+ int lo_word = static_cast(hi_32) & 0xFFFF; -+ lis(dst, Operand(SIGN_EXT_IMM16(hi_word))); -+ ori(dst, dst, Operand(lo_word)); -+ sldi(dst, dst, Operand(32)); -+ hi_word = (static_cast(lo_32) >> 16) & 0xFFFF; -+ lo_word = static_cast(lo_32) & 0xFFFF; -+ oris(dst, dst, Operand(hi_word)); -+ ori(dst, dst, Operand(lo_word)); -+#else -+ int value = src.immediate(); -+ if (!is_trampoline_pool_blocked()) { -+ if (is_int16(value)) { -+ li(dst, Operand(value)); -+ return; -+ } -+ } -+ int hi_word = static_cast(value) >> 16; -+ int lo_word = static_cast(value) & 0XFFFF; -+ -+ lis(dst, Operand(SIGN_EXT_IMM16(hi_word))); -+ if ((!is_trampoline_pool_blocked()) && (lo_word == 0)) { -+ return; -+ } -+ ori(dst, dst, Operand(lo_word)); -+#endif -+} -+ -+// Special register instructions -+void Assembler::crxor(int bt, int ba, int bb) { -+ emit(EXT1 | CRXOR | bt*B21 | ba*B16 | bb*B11); -+} -+ -+void Assembler::mflr(Register dst) { -+ emit(EXT2 | MFSPR | dst.code()*B21 | 256 << 11); // Ignore RC bit -+} -+ -+void Assembler::mtlr(Register src) { -+ emit(EXT2 | MTSPR | src.code()*B21 | 256 << 11); // Ignore RC bit -+} -+ -+void Assembler::mtctr(Register src) { -+ emit(EXT2 | MTSPR | src.code()*B21 | 288 << 11); // Ignore RC bit -+} -+ -+void Assembler::mtxer(Register src) { -+ emit(EXT2 | MTSPR | src.code()*B21 | 32 << 11); -+} -+ -+void Assembler::mcrfs(int bf, int bfa) { -+ emit(EXT4 | MCRFS | bf*B23 | bfa*B18); -+} -+ -+void Assembler::mfcr(Register dst) { -+ emit(EXT2 | MFCR | dst.code()*B21); -+} -+ -+// end PowerPC -+ -+// Exception-generating instructions and debugging support. -+// Stops with a non-negative code less than kNumOfWatchedStops support -+// enabling/disabling and a counter feature. See simulator-ppc.h . -+void Assembler::stop(const char* msg, Condition cond, int32_t code, -+ CRegister cr) { -+ if (cond != al) { -+ Label skip; -+ b(NegateCondition(cond), &skip, cr); -+ bkpt(0); -+ bind(&skip); -+ } else { -+ bkpt(0); -+ } -+} -+ -+void Assembler::bkpt(uint32_t imm16) { -+ emit(0x7d821008); -+} -+ -+ -+void Assembler::info(const char* msg, Condition cond, int32_t code, -+ CRegister cr) { -+ if (::v8::internal::FLAG_trace_sim_stubs) { -+ emit(0x7d9ff808); -+#if V8_TARGET_ARCH_PPC64 -+ uint64_t value = reinterpret_cast(msg); -+ emit(static_cast(value >> 32)); -+ emit(static_cast(value & 0xFFFFFFFF)); -+#else -+ emit(reinterpret_cast(msg)); -+#endif -+ } -+} -+ -+void Assembler::dcbf(Register ra, Register rb) { -+ emit(EXT2 | DCBF | ra.code()*B16 | rb.code()*B11); -+} -+ -+void Assembler::sync() { -+ emit(EXT2 | SYNC); -+} -+ -+void Assembler::icbi(Register ra, Register rb) { -+ emit(EXT2 | ICBI | ra.code()*B16 | rb.code()*B11); -+} -+ -+void Assembler::isync() { -+ emit(EXT1 | ISYNC); -+} -+ -+// Floating point support -+ -+void Assembler::lfd(const DwVfpRegister frt, const MemOperand &src) { -+ int offset = src.offset(); -+ Register ra = src.ra(); -+ ASSERT(is_int16(offset)); -+ int imm16 = offset & kImm16Mask; -+ // could be x_form instruction with some casting magic -+ emit(LFD | frt.code()*B21 | ra.code()*B16 | imm16); -+} -+ -+void Assembler::lfdu(const DwVfpRegister frt, const MemOperand &src) { -+ int offset = src.offset(); -+ Register ra = src.ra(); -+ ASSERT(is_int16(offset)); -+ int imm16 = offset & kImm16Mask; -+ // could be x_form instruction with some casting magic -+ emit(LFDU | frt.code()*B21 | ra.code()*B16 | imm16); -+} -+ -+void Assembler::lfdx(const DwVfpRegister frt, const MemOperand &src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | LFDX | frt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); -+} -+ -+void Assembler::lfdux(const DwVfpRegister frt, const MemOperand & src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | LFDUX | frt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); -+} -+ -+void Assembler::lfs(const DwVfpRegister frt, const MemOperand &src) { -+ int offset = src.offset(); -+ Register ra = src.ra(); -+ ASSERT(is_int16(offset)); -+ ASSERT(!ra.is(r0)); -+ int imm16 = offset & kImm16Mask; -+ // could be x_form instruction with some casting magic -+ emit(LFS | frt.code()*B21 | ra.code()*B16 | imm16); -+} -+ -+void Assembler::lfsu(const DwVfpRegister frt, const MemOperand &src) { -+ int offset = src.offset(); -+ Register ra = src.ra(); -+ ASSERT(is_int16(offset)); -+ ASSERT(!ra.is(r0)); -+ int imm16 = offset & kImm16Mask; -+ // could be x_form instruction with some casting magic -+ emit(LFSU | frt.code()*B21 | ra.code()*B16 | imm16); -+} -+ -+void Assembler::lfsx(const DwVfpRegister frt, const MemOperand &src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | LFSX | frt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); -+} -+ -+void Assembler::lfsux(const DwVfpRegister frt, const MemOperand & src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | LFSUX | frt.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); -+} -+ -+void Assembler::stfd(const DwVfpRegister frs, const MemOperand &src) { -+ int offset = src.offset(); -+ Register ra = src.ra(); -+ ASSERT(is_int16(offset)); -+ ASSERT(!ra.is(r0)); -+ int imm16 = offset & kImm16Mask; -+ // could be x_form instruction with some casting magic -+ emit(STFD | frs.code()*B21 | ra.code()*B16 | imm16); -+} -+ -+void Assembler::stfdu(const DwVfpRegister frs, const MemOperand &src) { -+ int offset = src.offset(); -+ Register ra = src.ra(); -+ ASSERT(is_int16(offset)); -+ ASSERT(!ra.is(r0)); -+ int imm16 = offset & kImm16Mask; -+ // could be x_form instruction with some casting magic -+ emit(STFDU | frs.code()*B21 | ra.code()*B16 | imm16); -+} -+ -+void Assembler::stfdx(const DwVfpRegister frs, const MemOperand &src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | STFDX | frs.code()*B21 | ra.code()*B16 | rb.code()*B11 | LeaveRC); -+} -+ -+void Assembler::stfdux(const DwVfpRegister frs, const MemOperand &src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | STFDUX | frs.code()*B21 | ra.code()*B16 | rb.code()*B11 |LeaveRC); -+} -+ -+void Assembler::stfs(const DwVfpRegister frs, const MemOperand &src) { -+ int offset = src.offset(); -+ Register ra = src.ra(); -+ ASSERT(is_int16(offset)); -+ ASSERT(!ra.is(r0)); -+ int imm16 = offset & kImm16Mask; -+ // could be x_form instruction with some casting magic -+ emit(STFS | frs.code()*B21 | ra.code()*B16 | imm16); -+} -+ -+void Assembler::stfsu(const DwVfpRegister frs, const MemOperand &src) { -+ int offset = src.offset(); -+ Register ra = src.ra(); -+ ASSERT(is_int16(offset)); -+ ASSERT(!ra.is(r0)); -+ int imm16 = offset & kImm16Mask; -+ // could be x_form instruction with some casting magic -+ emit(STFSU | frs.code()*B21 | ra.code()*B16 | imm16); -+} -+ -+void Assembler::stfsx(const DwVfpRegister frs, const MemOperand &src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | STFSX | frs.code()*B21 | ra.code()*B16 | rb.code()*B11 |LeaveRC); -+} -+ -+void Assembler::stfsux(const DwVfpRegister frs, const MemOperand &src) { -+ Register ra = src.ra(); -+ Register rb = src.rb(); -+ ASSERT(!ra.is(r0)); -+ emit(EXT2 | STFSUX | frs.code()*B21 | ra.code()*B16 | rb.code()*B11 |LeaveRC); -+} -+ -+void Assembler::fsub(const DwVfpRegister frt, -+ const DwVfpRegister fra, -+ const DwVfpRegister frb, -+ RCBit rc) { -+ a_form(EXT4 | FSUB, frt, fra, frb, rc); -+} -+ -+void Assembler::fadd(const DwVfpRegister frt, -+ const DwVfpRegister fra, -+ const DwVfpRegister frb, -+ RCBit rc) { -+ a_form(EXT4 | FADD, frt, fra, frb, rc); -+} -+void Assembler::fmul(const DwVfpRegister frt, -+ const DwVfpRegister fra, -+ const DwVfpRegister frc, -+ RCBit rc) { -+ emit(EXT4 | FMUL | frt.code()*B21 | fra.code()*B16 | frc.code()*B6 | rc); -+} -+void Assembler::fdiv(const DwVfpRegister frt, -+ const DwVfpRegister fra, -+ const DwVfpRegister frb, -+ RCBit rc) { -+ a_form(EXT4 | FDIV, frt, fra, frb, rc); -+} -+ -+void Assembler::fcmpu(const DwVfpRegister fra, -+ const DwVfpRegister frb, -+ CRegister cr) { -+ ASSERT(cr.code() >= 0 && cr.code() <= 7); -+ emit(EXT4 | FCMPU | cr.code()*B23 | fra.code()*B16 | frb.code()*B11); -+} -+ -+void Assembler::fmr(const DwVfpRegister frt, -+ const DwVfpRegister frb, -+ RCBit rc) { -+ emit(EXT4 | FMR | frt.code()*B21 | frb.code()*B11 | rc); -+} -+ -+void Assembler::fctiwz(const DwVfpRegister frt, -+ const DwVfpRegister frb) { -+ emit(EXT4 | FCTIWZ | frt.code()*B21 | frb.code()*B11); -+} -+ -+void Assembler::fctiw(const DwVfpRegister frt, -+ const DwVfpRegister frb) { -+ emit(EXT4 | FCTIW | frt.code()*B21 | frb.code()*B11); -+} -+ -+void Assembler::frim(const DwVfpRegister frt, -+ const DwVfpRegister frb) { -+ emit(EXT4 | FRIM | frt.code()*B21 | frb.code()*B11); -+} -+ -+void Assembler::frsp(const DwVfpRegister frt, -+ const DwVfpRegister frb, -+ RCBit rc) { -+ emit(EXT4 | FRSP | frt.code()*B21 | frb.code()*B11 | rc); -+} -+ -+void Assembler::fcfid(const DwVfpRegister frt, -+ const DwVfpRegister frb, -+ RCBit rc) { -+ emit(EXT4 | FCFID | frt.code()*B21 | frb.code()*B11 | rc); -+} -+ -+void Assembler::fctid(const DwVfpRegister frt, -+ const DwVfpRegister frb, -+ RCBit rc) { -+ emit(EXT4 | FCTID | frt.code()*B21 | frb.code()*B11 | rc); -+} -+ -+void Assembler::fctidz(const DwVfpRegister frt, -+ const DwVfpRegister frb, -+ RCBit rc) { -+ emit(EXT4 | FCTIDZ | frt.code()*B21 | frb.code()*B11 | rc); -+} -+ -+void Assembler::fsel(const DwVfpRegister frt, const DwVfpRegister fra, -+ const DwVfpRegister frc, const DwVfpRegister frb, -+ RCBit rc) { -+ emit(EXT4 | FSEL | frt.code()*B21 | fra.code()*B16 | frb.code()*B11 | -+ frc.code()*B6 | rc); -+} -+ -+void Assembler::fneg(const DwVfpRegister frt, -+ const DwVfpRegister frb, -+ RCBit rc) { -+ emit(EXT4 | FNEG | frt.code()*B21 | frb.code()*B11 | rc); -+} -+ -+void Assembler::mtfsfi(int bf, int immediate, RCBit rc) { -+ emit(EXT4 | MTFSFI | bf*B23 | immediate*B12 | rc); -+} -+ -+void Assembler::mffs(const DwVfpRegister frt, RCBit rc) { -+ emit(EXT4 | MFFS | frt.code()*B21 | rc); -+} -+ -+void Assembler::mtfsf(const DwVfpRegister frb, bool L, -+ int FLM, bool W, RCBit rc) { -+ emit(EXT4 | MTFSF | frb.code()*B11 | W*B16 | FLM*B17 | L*B25 | rc); -+} -+ -+void Assembler::fsqrt(const DwVfpRegister frt, -+ const DwVfpRegister frb, -+ RCBit rc) { -+ emit(EXT4 | FSQRT | frt.code()*B21 | frb.code()*B11 | rc); -+} -+ -+void Assembler::fabs(const DwVfpRegister frt, -+ const DwVfpRegister frb, -+ RCBit rc) { -+ emit(EXT4 | FABS | frt.code()*B21 | frb.code()*B11 | rc); -+} -+ -+// Pseudo instructions. -+void Assembler::nop(int type) { -+ switch (type) { -+ case 0: -+ ori(r0, r0, Operand::Zero()); -+ break; -+ case DEBUG_BREAK_NOP: -+ ori(r3, r3, Operand::Zero()); -+ break; -+ default: -+ UNIMPLEMENTED(); -+ } -+} -+ -+ -+bool Assembler::IsNop(Instr instr, int type) { -+ ASSERT((0 == type) || (DEBUG_BREAK_NOP == type)); -+ int reg = 0; -+ if (DEBUG_BREAK_NOP == type) { -+ reg = 3; -+ } -+ return instr == (ORI | reg*B21 | reg*B16); -+} -+ -+// Debugging. -+void Assembler::RecordJSReturn() { -+ positions_recorder()->WriteRecordedPositions(); -+ CheckBuffer(); -+ RecordRelocInfo(RelocInfo::JS_RETURN); -+} -+ -+ -+void Assembler::RecordDebugBreakSlot() { -+ positions_recorder()->WriteRecordedPositions(); -+ CheckBuffer(); -+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT); -+} -+ -+ -+void Assembler::RecordComment(const char* msg) { -+ if (FLAG_code_comments) { -+ CheckBuffer(); -+ RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast(msg)); -+ } -+} -+ -+ -+void Assembler::GrowBuffer() { -+ if (!own_buffer_) FATAL("external code buffer is too small"); -+ -+ // Compute new buffer size. -+ CodeDesc desc; // the new buffer -+ if (buffer_size_ < 4*KB) { -+ desc.buffer_size = 4*KB; -+ } else if (buffer_size_ < 1*MB) { -+ desc.buffer_size = 2*buffer_size_; -+ } else { -+ desc.buffer_size = buffer_size_ + 1*MB; -+ } -+ CHECK_GT(desc.buffer_size, 0); // no overflow -+ -+ // Set up new buffer. -+ desc.buffer = NewArray(desc.buffer_size); -+ -+ desc.instr_size = pc_offset(); -+ desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); -+ -+ // Copy the data. -+ intptr_t pc_delta = desc.buffer - buffer_; -+ intptr_t rc_delta = (desc.buffer + desc.buffer_size) - -+ (buffer_ + buffer_size_); -+ memmove(desc.buffer, buffer_, desc.instr_size); -+ memmove(reloc_info_writer.pos() + rc_delta, -+ reloc_info_writer.pos(), desc.reloc_size); -+ -+ // Switch buffers. -+ DeleteArray(buffer_); -+ buffer_ = desc.buffer; -+ buffer_size_ = desc.buffer_size; -+ pc_ += pc_delta; -+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, -+ reloc_info_writer.last_pc() + pc_delta); -+ -+ // None of our relocation types are pc relative pointing outside the code -+ // buffer nor pc absolute pointing inside the code buffer, so there is no need -+ // to relocate any emitted relocation entries. -+ -+#if ABI_USES_FUNCTION_DESCRIPTORS -+ // Relocate runtime entries. -+ for (RelocIterator it(desc); !it.done(); it.next()) { -+ RelocInfo::Mode rmode = it.rinfo()->rmode(); -+ if (rmode == RelocInfo::INTERNAL_REFERENCE) { -+ intptr_t* p = reinterpret_cast(it.rinfo()->pc()); -+ if (*p != 0) { // 0 means uninitialized. -+ *p += pc_delta; -+ } -+ } -+ } -+#endif -+} -+ -+ -+void Assembler::db(uint8_t data) { -+ CheckBuffer(); -+ *reinterpret_cast(pc_) = data; -+ pc_ += sizeof(uint8_t); -+} -+ -+ -+void Assembler::dd(uint32_t data) { -+ CheckBuffer(); -+ *reinterpret_cast(pc_) = data; -+ pc_ += sizeof(uint32_t); -+} -+ -+ -+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { -+ RelocInfo rinfo(pc_, rmode, data, NULL); -+ if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) { -+ // Adjust code for new modes. -+ ASSERT(RelocInfo::IsDebugBreakSlot(rmode) -+ || RelocInfo::IsJSReturn(rmode) -+ || RelocInfo::IsComment(rmode) -+ || RelocInfo::IsPosition(rmode)); -+ } -+ if (rinfo.rmode() != RelocInfo::NONE) { -+ // Don't record external references unless the heap will be serialized. -+ if (rmode == RelocInfo::EXTERNAL_REFERENCE) { -+#ifdef DEBUG -+ if (!Serializer::enabled()) { -+ Serializer::TooLateToEnableNow(); -+ } -+#endif -+ if (!Serializer::enabled() && !emit_debug_code()) { -+ return; -+ } -+ } -+ ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here -+ if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { -+ RelocInfo reloc_info_with_ast_id(pc_, -+ rmode, -+ RecordedAstId().ToInt(), -+ NULL); -+ ClearRecordedAstId(); -+ reloc_info_writer.Write(&reloc_info_with_ast_id); -+ } else { -+ reloc_info_writer.Write(&rinfo); -+ } -+ } -+} -+ -+ -+void Assembler::BlockTrampolinePoolFor(int instructions) { -+ BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); -+} -+ -+ -+void Assembler::CheckTrampolinePool() { -+ // Some small sequences of instructions must not be broken up by the -+ // insertion of a trampoline pool; such sequences are protected by setting -+ // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, -+ // which are both checked here. Also, recursive calls to CheckTrampolinePool -+ // are blocked by trampoline_pool_blocked_nesting_. -+ if ((trampoline_pool_blocked_nesting_ > 0) || -+ (pc_offset() < no_trampoline_pool_before_)) { -+ // Emission is currently blocked; make sure we try again as soon as -+ // possible. -+ if (trampoline_pool_blocked_nesting_ > 0) { -+ next_buffer_check_ = pc_offset() + kInstrSize; -+ } else { -+ next_buffer_check_ = no_trampoline_pool_before_; -+ } -+ return; -+ } -+ -+ ASSERT(!trampoline_emitted_); -+ ASSERT(unbound_labels_count_ >= 0); -+ if (unbound_labels_count_ > 0) { -+ // First we emit jump, then we emit trampoline pool. -+ { BlockTrampolinePoolScope block_trampoline_pool(this); -+ Label after_pool; -+ b(&after_pool); -+ -+ int pool_start = pc_offset(); -+ for (int i = 0; i < unbound_labels_count_; i++) { -+ b(&after_pool); -+ } -+ bind(&after_pool); -+ trampoline_ = Trampoline(pool_start, unbound_labels_count_); -+ -+ trampoline_emitted_ = true; -+ // As we are only going to emit trampoline once, we need to prevent any -+ // further emission. -+ next_buffer_check_ = kMaxInt; -+ } -+ } else { -+ // Number of branches to unbound label at this point is zero, so we can -+ // move next buffer check to maximum. -+ next_buffer_check_ = pc_offset() + -+ kMaxCondBranchReach - kMaxBlockTrampolineSectionSize; -+ } -+ return; -+} -+ -+} } // namespace v8::internal -+ -+#endif // V8_TARGET_ARCH_PPC -diff -up v8-3.14.5.10/src/ppc/assembler-ppc.h.ppc v8-3.14.5.10/src/ppc/assembler-ppc.h ---- v8-3.14.5.10/src/ppc/assembler-ppc.h.ppc 2016-06-07 14:15:45.985393038 -0400 -+++ v8-3.14.5.10/src/ppc/assembler-ppc.h 2016-06-07 14:15:45.985393038 -0400 -@@ -0,0 +1,1382 @@ -+// Copyright (c) 1994-2006 Sun Microsystems Inc. -+// All Rights Reserved. -+// -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions -+// are met: -+// -+// - Redistributions of source code must retain the above copyright notice, -+// this list of conditions and the following disclaimer. -+// -+// - Redistribution in binary form must reproduce the above copyright -+// notice, this list of conditions and the following disclaimer in the -+// documentation and/or other materials provided with the -+// distribution. -+// -+// - Neither the name of Sun Microsystems or the names of contributors may -+// be used to endorse or promote products derived from this software without -+// specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED -+// OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+// The original source code covered by the above license above has been -+// modified significantly by Google Inc. -+// Copyright 2012 the V8 project authors. All rights reserved. -+ -+// -+// Copyright IBM Corp. 2012, 2013. All rights reserved. -+// -+ -+// A light-weight PPC Assembler -+// Generates user mode instructions for the PPC architecture up -+ -+#ifndef V8_PPC_ASSEMBLER_PPC_H_ -+#define V8_PPC_ASSEMBLER_PPC_H_ -+#include -+#if !defined(_AIX) -+#include -+#include -+#include -+#endif -+#include "assembler.h" -+#include "constants-ppc.h" -+#include "serialize.h" -+ -+#define ABI_USES_FUNCTION_DESCRIPTORS \ -+ (V8_HOST_ARCH_PPC && \ -+ (defined(_AIX) || \ -+ (defined(V8_TARGET_ARCH_PPC64) && (__BYTE_ORDER != __LITTLE_ENDIAN)))) -+ -+#define ABI_PASSES_HANDLES_IN_REGS \ -+ (!V8_HOST_ARCH_PPC || defined(_AIX) || defined(V8_TARGET_ARCH_PPC64)) -+ -+#define ABI_RETURNS_HANDLES_IN_REGS \ -+ (!V8_HOST_ARCH_PPC || (__BYTE_ORDER == __LITTLE_ENDIAN)) -+ -+#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS \ -+ (!V8_HOST_ARCH_PPC || (__BYTE_ORDER == __LITTLE_ENDIAN)) -+ -+#define ABI_TOC_ADDRESSABILITY_VIA_IP \ -+ (V8_HOST_ARCH_PPC && defined(V8_TARGET_ARCH_PPC64) && \ -+ (__BYTE_ORDER == __LITTLE_ENDIAN)) -+ -+namespace v8 { -+namespace internal { -+ -+// CPU Registers. -+// -+// 1) We would prefer to use an enum, but enum values are assignment- -+// compatible with int, which has caused code-generation bugs. -+// -+// 2) We would prefer to use a class instead of a struct but we don't like -+// the register initialization to depend on the particular initialization -+// order (which appears to be different on OS X, Linux, and Windows for the -+// installed versions of C++ we tried). Using a struct permits C-style -+// "initialization". Also, the Register objects cannot be const as this -+// forces initialization stubs in MSVC, making us dependent on initialization -+// order. -+// -+// 3) By not using an enum, we are possibly preventing the compiler from -+// doing certain constant folds, which may significantly reduce the -+// code generated for some assembly instructions (because they boil down -+// to a few constants). If this is a problem, we could change the code -+// such that we use an enum in optimized mode, and the struct in debug -+// mode. This way we get the compile-time error checking in debug mode -+// and best performance in optimized code. -+ -+// Core register -+struct Register { -+ static const int kNumRegisters = 32; -+ static const int kNumAllocatableRegisters = 8; // r3-r10 -+ static const int kSizeInBytes = 4; -+ -+ static int ToAllocationIndex(Register reg) { -+ int index = reg.code() - 3; // r0-r2 are skipped -+ ASSERT(index < kNumAllocatableRegisters); -+ return index; -+ } -+ -+ static Register FromAllocationIndex(int index) { -+ ASSERT(index >= 0 && index < kNumAllocatableRegisters); -+ return from_code(index + 3); // r0-r2 are skipped -+ } -+ -+ static const char* AllocationIndexToString(int index) { -+ ASSERT(index >= 0 && index < kNumAllocatableRegisters); -+ const char* const names[] = { -+ "r3", -+ "r4", -+ "r5", -+ "r6", -+ "r7", -+ "r8", -+ "r9", -+ "r10", // currently last allocated register -+ "r11", // lithium scratch -+ "r12", // ip -+ "r13", -+ "r14", -+ "r15", -+ "r16", -+ "r17", -+ "r18", -+ "r19", -+ "r20", -+ "r21", -+ "r22", -+ "r23", -+ "r24", -+ "r25", -+ "r26", -+ "r27", -+ "r28", -+ "r29", -+ "r30", -+ }; -+ return names[index]; -+ } -+ -+ static Register from_code(int code) { -+ Register r = { code }; -+ return r; -+ } -+ -+ bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } -+ bool is(Register reg) const { return code_ == reg.code_; } -+ int code() const { -+ ASSERT(is_valid()); -+ return code_; -+ } -+ int bit() const { -+ ASSERT(is_valid()); -+ return 1 << code_; -+ } -+ -+ void set_code(int code) { -+ code_ = code; -+ ASSERT(is_valid()); -+ } -+ -+ // Unfortunately we can't make this private in a struct. -+ int code_; -+}; -+ -+// These constants are used in several locations, including static initializers -+const int kRegister_no_reg_Code = -1; -+const int kRegister_r0_Code = 0; -+const int kRegister_sp_Code = 1; // todo - rename to SP -+const int kRegister_r2_Code = 2; // special on PowerPC -+const int kRegister_r3_Code = 3; -+const int kRegister_r4_Code = 4; -+const int kRegister_r5_Code = 5; -+const int kRegister_r6_Code = 6; -+const int kRegister_r7_Code = 7; -+const int kRegister_r8_Code = 8; -+const int kRegister_r9_Code = 9; -+const int kRegister_r10_Code = 10; -+const int kRegister_r11_Code = 11; -+const int kRegister_ip_Code = 12; // todo - fix -+const int kRegister_r13_Code = 13; -+const int kRegister_r14_Code = 14; -+const int kRegister_r15_Code = 15; -+ -+const int kRegister_r16_Code = 16; -+const int kRegister_r17_Code = 17; -+const int kRegister_r18_Code = 18; -+const int kRegister_r19_Code = 19; -+const int kRegister_r20_Code = 20; -+const int kRegister_r21_Code = 21; -+const int kRegister_r22_Code = 22; -+const int kRegister_r23_Code = 23; -+const int kRegister_r24_Code = 24; -+const int kRegister_r25_Code = 25; -+const int kRegister_r26_Code = 26; -+const int kRegister_r27_Code = 27; -+const int kRegister_r28_Code = 28; -+const int kRegister_r29_Code = 29; -+const int kRegister_r30_Code = 30; -+const int kRegister_fp_Code = 31; -+ -+const Register no_reg = { kRegister_no_reg_Code }; -+ -+const Register r0 = { kRegister_r0_Code }; -+const Register sp = { kRegister_sp_Code }; -+const Register r2 = { kRegister_r2_Code }; -+const Register r3 = { kRegister_r3_Code }; -+const Register r4 = { kRegister_r4_Code }; -+const Register r5 = { kRegister_r5_Code }; -+const Register r6 = { kRegister_r6_Code }; -+const Register r7 = { kRegister_r7_Code }; -+const Register r8 = { kRegister_r8_Code }; -+const Register r9 = { kRegister_r9_Code }; -+const Register r10 = { kRegister_r10_Code }; -+// Used as lithium codegen scratch register. -+const Register r11 = { kRegister_r11_Code }; -+const Register ip = { kRegister_ip_Code }; -+// Used as roots register. -+const Register r13 = { kRegister_r13_Code }; -+const Register r14 = { kRegister_r14_Code }; -+const Register r15 = { kRegister_r15_Code }; -+ -+const Register r16 = { kRegister_r16_Code }; -+const Register r17 = { kRegister_r17_Code }; -+const Register r18 = { kRegister_r18_Code }; -+const Register r19 = { kRegister_r19_Code }; -+// Used as context register. -+const Register r20 = { kRegister_r20_Code }; -+const Register r21 = { kRegister_r21_Code }; -+const Register r22 = { kRegister_r22_Code }; -+const Register r23 = { kRegister_r23_Code }; -+const Register r24 = { kRegister_r24_Code }; -+const Register r25 = { kRegister_r25_Code }; -+const Register r26 = { kRegister_r26_Code }; -+const Register r27 = { kRegister_r27_Code }; -+const Register r28 = { kRegister_r28_Code }; -+const Register r29 = { kRegister_r29_Code }; -+const Register r30 = { kRegister_r30_Code }; -+const Register fp = { kRegister_fp_Code }; -+ -+// Double word FP register. -+struct DwVfpRegister { -+ static const int kNumRegisters = 32; -+ static const int kNumVolatileRegisters = 14; // d0-d13 -+ static const int kNumAllocatableRegisters = 12; // d1-d12 -+ -+ inline static int ToAllocationIndex(DwVfpRegister reg); -+ -+ static DwVfpRegister FromAllocationIndex(int index) { -+ ASSERT(index >= 0 && index < kNumAllocatableRegisters); -+ return from_code(index + 1); // d0 is skipped -+ } -+ -+ static const char* AllocationIndexToString(int index) { -+ ASSERT(index >= 0 && index < kNumAllocatableRegisters); -+ const char* const names[] = { -+ "d1", -+ "d2", -+ "d3", -+ "d4", -+ "d5", -+ "d6", -+ "d7", -+ "d8", -+ "d9", -+ "d10", -+ "d11", -+ "d12", -+ }; -+ return names[index]; -+ } -+ -+ static DwVfpRegister from_code(int code) { -+ DwVfpRegister r = { code }; -+ return r; -+ } -+ -+ // Supporting d0 to d15, can be later extended to d31. -+ bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } -+ bool is(DwVfpRegister reg) const { return code_ == reg.code_; } -+ -+ int code() const { -+ ASSERT(is_valid()); -+ return code_; -+ } -+ int bit() const { -+ ASSERT(is_valid()); -+ return 1 << code_; -+ } -+ void split_code(int* vm, int* m) const { -+ ASSERT(is_valid()); -+ *m = (code_ & 0x10) >> 4; -+ *vm = code_ & 0x0F; -+ } -+ -+ int code_; -+}; -+ -+ -+typedef DwVfpRegister DoubleRegister; -+ -+const DwVfpRegister no_dreg = { -1 }; -+const DwVfpRegister d0 = { 0 }; -+const DwVfpRegister d1 = { 1 }; -+const DwVfpRegister d2 = { 2 }; -+const DwVfpRegister d3 = { 3 }; -+const DwVfpRegister d4 = { 4 }; -+const DwVfpRegister d5 = { 5 }; -+const DwVfpRegister d6 = { 6 }; -+const DwVfpRegister d7 = { 7 }; -+const DwVfpRegister d8 = { 8 }; -+const DwVfpRegister d9 = { 9 }; -+const DwVfpRegister d10 = { 10 }; -+const DwVfpRegister d11 = { 11 }; -+const DwVfpRegister d12 = { 12 }; -+const DwVfpRegister d13 = { 13 }; -+const DwVfpRegister d14 = { 14 }; -+const DwVfpRegister d15 = { 15 }; -+const DwVfpRegister d16 = { 16 }; -+const DwVfpRegister d17 = { 17 }; -+const DwVfpRegister d18 = { 18 }; -+const DwVfpRegister d19 = { 19 }; -+const DwVfpRegister d20 = { 20 }; -+const DwVfpRegister d21 = { 21 }; -+const DwVfpRegister d22 = { 22 }; -+const DwVfpRegister d23 = { 23 }; -+const DwVfpRegister d24 = { 24 }; -+const DwVfpRegister d25 = { 25 }; -+const DwVfpRegister d26 = { 26 }; -+const DwVfpRegister d27 = { 27 }; -+const DwVfpRegister d28 = { 28 }; -+const DwVfpRegister d29 = { 29 }; -+const DwVfpRegister d30 = { 30 }; -+const DwVfpRegister d31 = { 31 }; -+ -+// Aliases for double registers. Defined using #define instead of -+// "static const DwVfpRegister&" because Clang complains otherwise when a -+// compilation unit that includes this header doesn't use the variables. -+#define kFirstCalleeSavedDoubleReg d14 -+#define kLastCalleeSavedDoubleReg d31 -+#define kDoubleRegZero d14 -+#define kScratchDoubleReg d13 -+ -+Register ToRegister(int num); -+ -+// Coprocessor register -+struct CRegister { -+ bool is_valid() const { return 0 <= code_ && code_ < 16; } -+ bool is(CRegister creg) const { return code_ == creg.code_; } -+ int code() const { -+ ASSERT(is_valid()); -+ return code_; -+ } -+ int bit() const { -+ ASSERT(is_valid()); -+ return 1 << code_; -+ } -+ -+ // Unfortunately we can't make this private in a struct. -+ int code_; -+}; -+ -+ -+const CRegister no_creg = { -1 }; -+ -+const CRegister cr0 = { 0 }; -+const CRegister cr1 = { 1 }; -+const CRegister cr2 = { 2 }; -+const CRegister cr3 = { 3 }; -+const CRegister cr4 = { 4 }; -+const CRegister cr5 = { 5 }; -+const CRegister cr6 = { 6 }; -+const CRegister cr7 = { 7 }; -+const CRegister cr8 = { 8 }; -+const CRegister cr9 = { 9 }; -+const CRegister cr10 = { 10 }; -+const CRegister cr11 = { 11 }; -+const CRegister cr12 = { 12 }; -+const CRegister cr13 = { 13 }; -+const CRegister cr14 = { 14 }; -+const CRegister cr15 = { 15 }; -+ -+// ----------------------------------------------------------------------------- -+// Machine instruction Operands -+ -+// Class Operand represents a shifter operand in data processing instructions -+class Operand BASE_EMBEDDED { -+ public: -+ // immediate -+ INLINE(explicit Operand(intptr_t immediate, -+ RelocInfo::Mode rmode = RelocInfo::NONE)); -+ INLINE(static Operand Zero()) { -+ return Operand(static_cast(0)); -+ } -+ INLINE(explicit Operand(const ExternalReference& f)); -+ explicit Operand(Handle handle); -+ INLINE(explicit Operand(Smi* value)); -+ -+ // rm -+ INLINE(explicit Operand(Register rm)); -+ -+ // Return true if this is a register operand. -+ INLINE(bool is_reg() const); -+ -+ inline intptr_t immediate() const { -+ ASSERT(!rm_.is_valid()); -+ return imm_; -+ } -+ -+ Register rm() const { return rm_; } -+ -+ private: -+ Register rm_; -+ intptr_t imm_; // valid if rm_ == no_reg -+ RelocInfo::Mode rmode_; -+ -+ friend class Assembler; -+ friend class MacroAssembler; -+}; -+ -+ -+// Class MemOperand represents a memory operand in load and store instructions -+// On PowerPC we have base register + 16bit signed value -+// Alternatively we can have a 16bit signed value immediate -+class MemOperand BASE_EMBEDDED { -+ public: -+ explicit MemOperand(Register rn, int32_t offset = 0); -+ -+ explicit MemOperand(Register ra, Register rb); -+ -+ int32_t offset() const { -+ ASSERT(rb_.is(no_reg)); -+ return offset_; -+ } -+ -+ // PowerPC - base register -+ Register ra() const { -+ ASSERT(!ra_.is(no_reg)); -+ return ra_; -+ } -+ -+ Register rb() const { -+ ASSERT(offset_ == 0 && !rb_.is(no_reg)); -+ return rb_; -+ } -+ -+ private: -+ Register ra_; // base -+ int32_t offset_; // offset -+ Register rb_; // index -+ -+ friend class Assembler; -+}; -+ -+// CpuFeatures keeps track of which features are supported by the target CPU. -+// Supported features must be enabled by a Scope before use. -+class CpuFeatures : public AllStatic { -+ public: -+ // Detect features of the target CPU. Set safe defaults if the serializer -+ // is enabled (snapshots must be portable). -+ static void Probe(); -+ -+ // Check whether a feature is supported by the target CPU. -+ static bool IsSupported(CpuFeature f) { -+ ASSERT(initialized_); -+ return (supported_ & (1u << f)) != 0; -+ } -+ -+#ifdef DEBUG -+ // Check whether a feature is currently enabled. -+ static bool IsEnabled(CpuFeature f) { -+ ASSERT(initialized_); -+ Isolate* isolate = Isolate::UncheckedCurrent(); -+ if (isolate == NULL) { -+ // When no isolate is available, work as if we're running in -+ // release mode. -+ return IsSupported(f); -+ } -+ unsigned enabled = static_cast(isolate->enabled_cpu_features()); -+ return (enabled & (1u << f)) != 0; -+ } -+#endif -+ -+ // Enable a specified feature within a scope. -+ class Scope BASE_EMBEDDED { -+#ifdef DEBUG -+ -+ public: -+ explicit Scope(CpuFeature f) { -+ unsigned mask = 1u << f; -+ ASSERT(CpuFeatures::IsSupported(f)); -+ ASSERT(!Serializer::enabled() || -+ (CpuFeatures::found_by_runtime_probing_ & mask) == 0); -+ isolate_ = Isolate::UncheckedCurrent(); -+ old_enabled_ = 0; -+ if (isolate_ != NULL) { -+ old_enabled_ = static_cast(isolate_->enabled_cpu_features()); -+ isolate_->set_enabled_cpu_features(old_enabled_ | mask); -+ } -+ } -+ ~Scope() { -+ ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_); -+ if (isolate_ != NULL) { -+ isolate_->set_enabled_cpu_features(old_enabled_); -+ } -+ } -+ -+ private: -+ Isolate* isolate_; -+ unsigned old_enabled_; -+#else -+ -+ public: -+ explicit Scope(CpuFeature f) {} -+#endif -+ }; -+ -+ class TryForceFeatureScope BASE_EMBEDDED { -+ public: -+ explicit TryForceFeatureScope(CpuFeature f) -+ : old_supported_(CpuFeatures::supported_) { -+ if (CanForce()) { -+ CpuFeatures::supported_ |= (1u << f); -+ } -+ } -+ -+ ~TryForceFeatureScope() { -+ if (CanForce()) { -+ CpuFeatures::supported_ = old_supported_; -+ } -+ } -+ -+ private: -+ static bool CanForce() { -+ // It's only safe to temporarily force support of CPU features -+ // when there's only a single isolate, which is guaranteed when -+ // the serializer is enabled. -+ return Serializer::enabled(); -+ } -+ -+ const unsigned old_supported_; -+ }; -+ -+ private: -+#ifdef DEBUG -+ static bool initialized_; -+#endif -+ static unsigned supported_; -+ static unsigned found_by_runtime_probing_; -+ -+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures); -+}; -+ -+ -+class Assembler : public AssemblerBase { -+ public: -+ // Create an assembler. Instructions and relocation information are emitted -+ // into a buffer, with the instructions starting from the beginning and the -+ // relocation information starting from the end of the buffer. See CodeDesc -+ // for a detailed comment on the layout (globals.h). -+ // -+ // If the provided buffer is NULL, the assembler allocates and grows its own -+ // buffer, and buffer_size determines the initial buffer size. The buffer is -+ // owned by the assembler and deallocated upon destruction of the assembler. -+ // -+ // If the provided buffer is not NULL, the assembler uses the provided buffer -+ // for code generation and assumes its size to be buffer_size. If the buffer -+ // is too small, a fatal error occurs. No deallocation of the buffer is done -+ // upon destruction of the assembler. -+ Assembler(Isolate* isolate, void* buffer, int buffer_size); -+ ~Assembler(); -+ -+ // Overrides the default provided by FLAG_debug_code. -+ void set_emit_debug_code(bool value) { emit_debug_code_ = value; } -+ -+ // Avoids using instructions that vary in size in unpredictable ways between -+ // the snapshot and the running VM. This is needed by the full compiler so -+ // that it can recompile code with debug support and fix the PC. -+ void set_predictable_code_size(bool value) { predictable_code_size_ = value; } -+ -+ // GetCode emits any pending (non-emitted) code and fills the descriptor -+ // desc. GetCode() is idempotent; it returns the same result if no other -+ // Assembler functions are invoked in between GetCode() calls. -+ void GetCode(CodeDesc* desc); -+ -+ // Label operations & relative jumps (PPUM Appendix D) -+ // -+ // Takes a branch opcode (cc) and a label (L) and generates -+ // either a backward branch or a forward branch and links it -+ // to the label fixup chain. Usage: -+ // -+ // Label L; // unbound label -+ // j(cc, &L); // forward branch to unbound label -+ // bind(&L); // bind label to the current pc -+ // j(cc, &L); // backward branch to bound label -+ // bind(&L); // illegal: a label may be bound only once -+ // -+ // Note: The same Label can be used for forward and backward branches -+ // but it may be bound only once. -+ -+ void bind(Label* L); // binds an unbound label L to the current code position -+ // Determines if Label is bound and near enough so that a single -+ // branch instruction can be used to reach it. -+ bool is_near(Label* L, Condition cond); -+ -+ // Returns the branch offset to the given label from the current code position -+ // Links the label to the current position if it is still unbound -+ // Manages the jump elimination optimization if the second parameter is true. -+ int branch_offset(Label* L, bool jump_elimination_allowed); -+ -+ // Puts a labels target address at the given position. -+ // The high 8 bits are set to zero. -+ void label_at_put(Label* L, int at_offset); -+ -+ // Read/Modify the code target address in the branch/call instruction at pc. -+ INLINE(static Address target_address_at(Address pc)); -+ INLINE(static void set_target_address_at(Address pc, Address target)); -+ -+ // Return the code target address at a call site from the return address -+ // of that call in the instruction stream. -+ inline static Address target_address_from_return_address(Address pc); -+ -+ // This sets the branch destination. -+ // This is for calls and branches within generated code. -+ inline static void deserialization_set_special_target_at( -+ Address instruction_payload, Address target); -+ -+ // Size of an instruction. -+ static const int kInstrSize = sizeof(Instr); -+ -+ // Here we are patching the address in the LUI/ORI instruction pair. -+ // These values are used in the serialization process and must be zero for -+ // PPC platform, as Code, Embedded Object or External-reference pointers -+ // are split across two consecutive instructions and don't exist separately -+ // in the code, so the serializer should not step forwards in memory after -+ // a target is resolved and written. -+ static const int kSpecialTargetSize = 0; -+ -+ // Number of consecutive instructions used to store pointer sized constant. -+#if V8_TARGET_ARCH_PPC64 -+ static const int kInstructionsForPtrConstant = 5; -+#else -+ static const int kInstructionsForPtrConstant = 2; -+#endif -+ -+ // Distance between the instruction referring to the address of the call -+ // target and the return address. -+ -+ // Call sequence is a FIXED_SEQUENCE: -+ // lis r8, 2148 @ call address hi -+ // ori r8, r8, 5728 @ call address lo -+ // mtlr r8 -+ // blrl -+ // @ return address -+ // in 64bit mode, the addres load is a 5 instruction sequence -+#if V8_TARGET_ARCH_PPC64 -+ static const int kCallTargetAddressOffset = 7 * kInstrSize; -+#else -+ static const int kCallTargetAddressOffset = 4 * kInstrSize; -+#endif -+ -+ // Distance between start of patched return sequence and the emitted address -+ // to jump to. -+ // Patched return sequence is a FIXED_SEQUENCE: -+ // lis r0,
-+ // ori r0, r0,
-+ // mtlr r0 -+ // blrl -+ static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize; -+ -+ // Distance between start of patched debug break slot and the emitted address -+ // to jump to. -+ // Patched debug break slot code is a FIXED_SEQUENCE: -+ // lis r0,
-+ // ori r0, r0,
-+ // mtlr r0 -+ // blrl -+ static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize; -+ -+ // Difference between address of current opcode and value read from pc -+ // register. -+ static const int kPcLoadDelta = 0; // Todo: remove -+ -+#if V8_TARGET_ARCH_PPC64 -+ static const int kPatchDebugBreakSlotReturnOffset = 7 * kInstrSize; -+#else -+ static const int kPatchDebugBreakSlotReturnOffset = 4 * kInstrSize; -+#endif -+ -+ // This is the length of the BreakLocationIterator::SetDebugBreakAtReturn() -+ // code patch FIXED_SEQUENCE -+#if V8_TARGET_ARCH_PPC64 -+ static const int kJSReturnSequenceInstructions = 8; -+#else -+ static const int kJSReturnSequenceInstructions = 5; -+#endif -+ -+ // This is the length of the code sequence from SetDebugBreakAtSlot() -+ // FIXED_SEQUENCE -+#if V8_TARGET_ARCH_PPC64 -+ static const int kDebugBreakSlotInstructions = 7; -+#else -+ static const int kDebugBreakSlotInstructions = 4; -+#endif -+ static const int kDebugBreakSlotLength = -+ kDebugBreakSlotInstructions * kInstrSize; -+ -+ static inline int encode_crbit(const CRegister& cr, enum CRBit crbit) { -+ return ((cr.code() * CRWIDTH) + crbit); -+ } -+ -+ // --------------------------------------------------------------------------- -+ // Code generation -+ -+ // Insert the smallest number of nop instructions -+ // possible to align the pc offset to a multiple -+ // of m. m must be a power of 2 (>= 4). -+ void Align(int m); -+ // Aligns code to something that's optimal for a jump target for the platform. -+ void CodeTargetAlign(); -+ -+ // Branch instructions -+ void bclr(BOfield bo, LKBit lk); -+ void blr(); -+ void bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk = LeaveLK); -+ void b(int branch_offset, LKBit lk); -+ -+ void bcctr(BOfield bo, LKBit lk); -+ void bcr(); -+ -+ // Convenience branch instructions using labels -+ void b(Label* L, LKBit lk = LeaveLK) { -+ b(branch_offset(L, false), lk); -+ } -+ -+ void bc_short(Condition cond, Label* L, CRegister cr = cr7, -+ LKBit lk = LeaveLK) { -+ ASSERT(cond != al); -+ ASSERT(cr.code() >= 0 && cr.code() <= 7); -+ -+ int b_offset = branch_offset(L, false); -+ -+ switch (cond) { -+ case eq: -+ bc(b_offset, BT, encode_crbit(cr, CR_EQ), lk); -+ break; -+ case ne: -+ bc(b_offset, BF, encode_crbit(cr, CR_EQ), lk); -+ break; -+ case gt: -+ bc(b_offset, BT, encode_crbit(cr, CR_GT), lk); -+ break; -+ case le: -+ bc(b_offset, BF, encode_crbit(cr, CR_GT), lk); -+ break; -+ case lt: -+ bc(b_offset, BT, encode_crbit(cr, CR_LT), lk); -+ break; -+ case ge: -+ bc(b_offset, BF, encode_crbit(cr, CR_LT), lk); -+ break; -+ case unordered: -+ bc(b_offset, BT, encode_crbit(cr, CR_FU), lk); -+ break; -+ case ordered: -+ bc(b_offset, BF, encode_crbit(cr, CR_FU), lk); -+ break; -+ case overflow: -+ bc(b_offset, BT, encode_crbit(cr, CR_SO), lk); -+ break; -+ case nooverflow: -+ bc(b_offset, BF, encode_crbit(cr, CR_SO), lk); -+ break; -+ default: -+ UNIMPLEMENTED(); -+ } -+ } -+ -+ void b(Condition cond, Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { -+ if (cond == al) { -+ b(L, lk); -+ return; -+ } -+ -+ if ((L->is_bound() && is_near(L, cond)) || -+ !is_trampoline_emitted()) { -+ bc_short(cond, L, cr, lk); -+ return; -+ } -+ -+ Label skip; -+ Condition neg_cond = NegateCondition(cond); -+ bc_short(neg_cond, &skip, cr); -+ b(L, lk); -+ bind(&skip); -+ } -+ -+ void bne(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { -+ b(ne, L, cr, lk); } -+ void beq(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { -+ b(eq, L, cr, lk); } -+ void blt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { -+ b(lt, L, cr, lk); } -+ void bge(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { -+ b(ge, L, cr, lk); } -+ void ble(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { -+ b(le, L, cr, lk); } -+ void bgt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { -+ b(gt, L, cr, lk); } -+ void bunordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { -+ b(unordered, L, cr, lk); } -+ void bordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) { -+ b(ordered, L, cr, lk); } -+ void boverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) { -+ b(overflow, L, cr, lk); } -+ void bnooverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) { -+ b(nooverflow, L, cr, lk); } -+ -+ // Decrement CTR; branch if CTR != 0 -+ void bdnz(Label* L, LKBit lk = LeaveLK) { -+ bc(branch_offset(L, false), DCBNZ, 0, lk); -+ } -+ -+ // Data-processing instructions -+ -+ // PowerPC -+ void sub(Register dst, Register src1, Register src2, -+ OEBit s = LeaveOE, RCBit r = LeaveRC); -+ -+ void subfic(Register dst, Register src, const Operand& imm); -+ -+ void subfc(Register dst, Register src1, Register src2, -+ OEBit s = LeaveOE, RCBit r = LeaveRC); -+ -+ void add(Register dst, Register src1, Register src2, -+ OEBit s = LeaveOE, RCBit r = LeaveRC); -+ -+ void addc(Register dst, Register src1, Register src2, -+ OEBit o = LeaveOE, RCBit r = LeaveRC); -+ -+ void addze(Register dst, Register src1, OEBit o, RCBit r); -+ -+ void mullw(Register dst, Register src1, Register src2, -+ OEBit o = LeaveOE, RCBit r = LeaveRC); -+ -+ void mulhw(Register dst, Register src1, Register src2, -+ OEBit o = LeaveOE, RCBit r = LeaveRC); -+ -+ void divw(Register dst, Register src1, Register src2, -+ OEBit o = LeaveOE, RCBit r = LeaveRC); -+ -+ void addi(Register dst, Register src, const Operand& imm); -+ void addis(Register dst, Register src, const Operand& imm); -+ void addic(Register dst, Register src, const Operand& imm); -+ -+ void and_(Register dst, Register src1, Register src2, RCBit rc = LeaveRC); -+ void andc(Register dst, Register src1, Register src2, RCBit rc = LeaveRC); -+ void andi(Register ra, Register rs, const Operand& imm); -+ void andis(Register ra, Register rs, const Operand& imm); -+ void nor(Register dst, Register src1, Register src2, RCBit r = LeaveRC); -+ void notx(Register dst, Register src, RCBit r = LeaveRC); -+ void ori(Register dst, Register src, const Operand& imm); -+ void oris(Register dst, Register src, const Operand& imm); -+ void orx(Register dst, Register src1, Register src2, RCBit rc = LeaveRC); -+ void xori(Register dst, Register src, const Operand& imm); -+ void xoris(Register ra, Register rs, const Operand& imm); -+ void xor_(Register dst, Register src1, Register src2, RCBit rc = LeaveRC); -+ void cmpi(Register src1, const Operand& src2, CRegister cr = cr7); -+ void cmpli(Register src1, const Operand& src2, CRegister cr = cr7); -+ void li(Register dst, const Operand& src); -+ void lis(Register dst, const Operand& imm); -+ void mr(Register dst, Register src); -+ -+ void lbz(Register dst, const MemOperand& src); -+ void lbzx(Register dst, const MemOperand& src); -+ void lbzux(Register dst, const MemOperand& src); -+ void lhz(Register dst, const MemOperand& src); -+ void lhzx(Register dst, const MemOperand& src); -+ void lhzux(Register dst, const MemOperand& src); -+ void lwz(Register dst, const MemOperand& src); -+ void lwzu(Register dst, const MemOperand& src); -+ void lwzx(Register dst, const MemOperand& src); -+ void lwzux(Register dst, const MemOperand& src); -+ void lwa(Register dst, const MemOperand& src); -+ void stb(Register dst, const MemOperand& src); -+ void stbx(Register dst, const MemOperand& src); -+ void stbux(Register dst, const MemOperand& src); -+ void sth(Register dst, const MemOperand& src); -+ void sthx(Register dst, const MemOperand& src); -+ void sthux(Register dst, const MemOperand& src); -+ void stw(Register dst, const MemOperand& src); -+ void stwu(Register dst, const MemOperand& src); -+ void stwx(Register rs, const MemOperand& src); -+ void stwux(Register rs, const MemOperand& src); -+ -+ void extsb(Register rs, Register ra, RCBit r = LeaveRC); -+ void extsh(Register rs, Register ra, RCBit r = LeaveRC); -+ -+ void neg(Register rt, Register ra, OEBit o = LeaveOE, RCBit c = LeaveRC); -+ -+#if V8_TARGET_ARCH_PPC64 -+ void ld(Register rd, const MemOperand &src); -+ void ldx(Register rd, const MemOperand &src); -+ void ldu(Register rd, const MemOperand &src); -+ void ldux(Register rd, const MemOperand &src); -+ void std(Register rs, const MemOperand &src); -+ void stdx(Register rs, const MemOperand &src); -+ void stdu(Register rs, const MemOperand &src); -+ void stdux(Register rs, const MemOperand &src); -+ void rldic(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC); -+ void rldicl(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC); -+ void rldicr(Register dst, Register src, int sh, int me, RCBit r = LeaveRC); -+ void rldimi(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC); -+ void sldi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC); -+ void srdi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC); -+ void clrrdi(Register dst, Register src, const Operand& val, -+ RCBit rc = LeaveRC); -+ void clrldi(Register dst, Register src, const Operand& val, -+ RCBit rc = LeaveRC); -+ void sradi(Register ra, Register rs, int sh, RCBit r = LeaveRC); -+ void srd(Register dst, Register src1, Register src2, RCBit r = LeaveRC); -+ void sld(Register dst, Register src1, Register src2, RCBit r = LeaveRC); -+ void srad(Register dst, Register src1, Register src2, RCBit r = LeaveRC); -+ void cntlzd_(Register dst, Register src, RCBit rc = LeaveRC); -+ void extsw(Register rs, Register ra, RCBit r = LeaveRC); -+ void mulld(Register dst, Register src1, Register src2, -+ OEBit o = LeaveOE, RCBit r = LeaveRC); -+ void divd(Register dst, Register src1, Register src2, -+ OEBit o = LeaveOE, RCBit r = LeaveRC); -+#endif -+ -+ void rlwinm(Register ra, Register rs, int sh, int mb, int me, -+ RCBit rc = LeaveRC); -+ void rlwimi(Register ra, Register rs, int sh, int mb, int me, -+ RCBit rc = LeaveRC); -+ void slwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC); -+ void srwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC); -+ void clrrwi(Register dst, Register src, const Operand& val, -+ RCBit rc = LeaveRC); -+ void clrlwi(Register dst, Register src, const Operand& val, -+ RCBit rc = LeaveRC); -+ void srawi(Register ra, Register rs, int sh, RCBit r = LeaveRC); -+ void srw(Register dst, Register src1, Register src2, RCBit r = LeaveRC); -+ void slw(Register dst, Register src1, Register src2, RCBit r = LeaveRC); -+ void sraw(Register dst, Register src1, Register src2, RCBit r = LeaveRC); -+ -+ void cntlzw_(Register dst, Register src, RCBit rc = LeaveRC); -+ // end PowerPC -+ -+ void subi(Register dst, Register src1, const Operand& src2); -+ -+ void cmp(Register src1, Register src2, CRegister cr = cr7); -+ void cmpl(Register src1, Register src2, CRegister cr = cr7); -+ -+ void mov(Register dst, const Operand& src); -+ -+ // Multiply instructions -+ -+ // PowerPC -+ void mul(Register dst, Register src1, Register src2, -+ OEBit s = LeaveOE, RCBit r = LeaveRC); -+ -+ // Miscellaneous arithmetic instructions -+ -+ // Special register access -+ // PowerPC -+ void crxor(int bt, int ba, int bb); -+ void mflr(Register dst); -+ void mtlr(Register src); -+ void mtctr(Register src); -+ void mtxer(Register src); -+ void mcrfs(int bf, int bfa); -+ void mfcr(Register dst); -+ -+ void fake_asm(enum FAKE_OPCODE_T fopcode); -+ void marker_asm(int mcode); -+ void function_descriptor(); -+ // end PowerPC -+ -+ // Exception-generating instructions and debugging support -+ void stop(const char* msg, -+ Condition cond = al, -+ int32_t code = kDefaultStopCode, -+ CRegister cr = cr7); -+ -+ void bkpt(uint32_t imm16); // v5 and above -+ -+ // Informational messages when simulating -+ void info(const char* msg, -+ Condition cond = al, -+ int32_t code = kDefaultStopCode, -+ CRegister cr = cr7); -+ -+ void dcbf(Register ra, Register rb); -+ void sync(); -+ void icbi(Register ra, Register rb); -+ void isync(); -+ -+ // Support for floating point -+ void lfd(const DwVfpRegister frt, const MemOperand& src); -+ void lfdu(const DwVfpRegister frt, const MemOperand& src); -+ void lfdx(const DwVfpRegister frt, const MemOperand& src); -+ void lfdux(const DwVfpRegister frt, const MemOperand& src); -+ void lfs(const DwVfpRegister frt, const MemOperand& src); -+ void lfsu(const DwVfpRegister frt, const MemOperand& src); -+ void lfsx(const DwVfpRegister frt, const MemOperand& src); -+ void lfsux(const DwVfpRegister frt, const MemOperand& src); -+ void stfd(const DwVfpRegister frs, const MemOperand& src); -+ void stfdu(const DwVfpRegister frs, const MemOperand& src); -+ void stfdx(const DwVfpRegister frs, const MemOperand& src); -+ void stfdux(const DwVfpRegister frs, const MemOperand& src); -+ void stfs(const DwVfpRegister frs, const MemOperand& src); -+ void stfsu(const DwVfpRegister frs, const MemOperand& src); -+ void stfsx(const DwVfpRegister frs, const MemOperand& src); -+ void stfsux(const DwVfpRegister frs, const MemOperand& src); -+ -+ void fadd(const DwVfpRegister frt, const DwVfpRegister fra, -+ const DwVfpRegister frb, RCBit rc = LeaveRC); -+ void fsub(const DwVfpRegister frt, const DwVfpRegister fra, -+ const DwVfpRegister frb, RCBit rc = LeaveRC); -+ void fdiv(const DwVfpRegister frt, const DwVfpRegister fra, -+ const DwVfpRegister frb, RCBit rc = LeaveRC); -+ void fmul(const DwVfpRegister frt, const DwVfpRegister fra, -+ const DwVfpRegister frc, RCBit rc = LeaveRC); -+ void fcmpu(const DwVfpRegister fra, const DwVfpRegister frb, -+ CRegister cr = cr7); -+ void fmr(const DwVfpRegister frt, const DwVfpRegister frb, -+ RCBit rc = LeaveRC); -+ void fctiwz(const DwVfpRegister frt, const DwVfpRegister frb); -+ void fctiw(const DwVfpRegister frt, const DwVfpRegister frb); -+ void frim(const DwVfpRegister frt, const DwVfpRegister frb); -+ void frsp(const DwVfpRegister frt, const DwVfpRegister frb, -+ RCBit rc = LeaveRC); -+ void fcfid(const DwVfpRegister frt, const DwVfpRegister frb, -+ RCBit rc = LeaveRC); -+ void fctid(const DwVfpRegister frt, const DwVfpRegister frb, -+ RCBit rc = LeaveRC); -+ void fctidz(const DwVfpRegister frt, const DwVfpRegister frb, -+ RCBit rc = LeaveRC); -+ void fsel(const DwVfpRegister frt, const DwVfpRegister fra, -+ const DwVfpRegister frc, const DwVfpRegister frb, -+ RCBit rc = LeaveRC); -+ void fneg(const DwVfpRegister frt, const DwVfpRegister frb, -+ RCBit rc = LeaveRC); -+ void mtfsfi(int bf, int immediate, RCBit rc = LeaveRC); -+ void mffs(const DwVfpRegister frt, RCBit rc = LeaveRC); -+ void mtfsf(const DwVfpRegister frb, bool L = 1, int FLM = 0, bool W = 0, -+ RCBit rc = LeaveRC); -+ void fsqrt(const DwVfpRegister frt, const DwVfpRegister frb, -+ RCBit rc = LeaveRC); -+ void fabs(const DwVfpRegister frt, const DwVfpRegister frb, -+ RCBit rc = LeaveRC); -+ -+ // Pseudo instructions -+ -+ // Different nop operations are used by the code generator to detect certain -+ // states of the generated code. -+ enum NopMarkerTypes { -+ NON_MARKING_NOP = 0, -+ DEBUG_BREAK_NOP, -+ // IC markers. -+ PROPERTY_ACCESS_INLINED, -+ PROPERTY_ACCESS_INLINED_CONTEXT, -+ PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, -+ // Helper values. -+ LAST_CODE_MARKER, -+ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED -+ }; -+ -+ void nop(int type = 0); // 0 is the default non-marking type. -+ -+ void push(Register src) { -+#if V8_TARGET_ARCH_PPC64 -+ stdu(src, MemOperand(sp, -8)); -+#else -+ stwu(src, MemOperand(sp, -4)); -+#endif -+ } -+ -+ void pop(Register dst) { -+#if V8_TARGET_ARCH_PPC64 -+ ld(dst, MemOperand(sp)); -+ addi(sp, sp, Operand(8)); -+#else -+ lwz(dst, MemOperand(sp)); -+ addi(sp, sp, Operand(4)); -+#endif -+ } -+ -+ void pop() { -+ addi(sp, sp, Operand(kPointerSize)); -+ } -+ -+ // Jump unconditionally to given label. -+ void jmp(Label* L) { b(L); } -+ -+ bool predictable_code_size() const { return predictable_code_size_; } -+ -+ // Check the code size generated from label to here. -+ int SizeOfCodeGeneratedSince(Label* label) { -+ return pc_offset() - label->pos(); -+ } -+ -+ // Check the number of instructions generated from label to here. -+ int InstructionsGeneratedSince(Label* label) { -+ return SizeOfCodeGeneratedSince(label) / kInstrSize; -+ } -+ -+ // Class for scoping postponing the trampoline pool generation. -+ class BlockTrampolinePoolScope { -+ public: -+ explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) { -+ assem_->StartBlockTrampolinePool(); -+ } -+ ~BlockTrampolinePoolScope() { -+ assem_->EndBlockTrampolinePool(); -+ } -+ -+ private: -+ Assembler* assem_; -+ -+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope); -+ }; -+ -+ // Debugging -+ -+ // Mark address of the ExitJSFrame code. -+ void RecordJSReturn(); -+ -+ // Mark address of a debug break slot. -+ void RecordDebugBreakSlot(); -+ -+ // Record the AST id of the CallIC being compiled, so that it can be placed -+ // in the relocation information. -+ void SetRecordedAstId(TypeFeedbackId ast_id) { -+// PPC - this shouldn't be failing roohack ASSERT(recorded_ast_id_.IsNone()); -+ recorded_ast_id_ = ast_id; -+ } -+ -+ TypeFeedbackId RecordedAstId() { -+ // roohack - another issue??? ASSERT(!recorded_ast_id_.IsNone()); -+ return recorded_ast_id_; -+ } -+ -+ void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); } -+ -+ // Record a comment relocation entry that can be used by a disassembler. -+ // Use --code-comments to enable. -+ void RecordComment(const char* msg); -+ -+ // Writes a single byte or word of data in the code stream. Used -+ // for inline tables, e.g., jump-tables. -+ void db(uint8_t data); -+ void dd(uint32_t data); -+ -+ int pc_offset() const { return pc_ - buffer_; } -+ -+ PositionsRecorder* positions_recorder() { return &positions_recorder_; } -+ -+ // Read/patch instructions -+ Instr instr_at(int pos) { return *reinterpret_cast(buffer_ + pos); } -+ void instr_at_put(int pos, Instr instr) { -+ *reinterpret_cast(buffer_ + pos) = instr; -+ } -+ static Instr instr_at(byte* pc) { return *reinterpret_cast(pc); } -+ static void instr_at_put(byte* pc, Instr instr) { -+ *reinterpret_cast(pc) = instr; -+ } -+ static Condition GetCondition(Instr instr); -+ -+ static bool IsLis(Instr instr); -+ static bool IsAddic(Instr instr); -+ static bool IsOri(Instr instr); -+ -+ static bool IsBranch(Instr instr); -+ static Register GetRA(Instr instr); -+ static Register GetRB(Instr instr); -+#if V8_TARGET_ARCH_PPC64 -+ static bool Is64BitLoadIntoR12(Instr instr1, Instr instr2, -+ Instr instr3, Instr instr4, Instr instr5); -+#else -+ static bool Is32BitLoadIntoR12(Instr instr1, Instr instr2); -+#endif -+ -+ static bool IsCmpRegister(Instr instr); -+ static bool IsCmpImmediate(Instr instr); -+ static bool IsRlwinm(Instr instr); -+#if V8_TARGET_ARCH_PPC64 -+ static bool IsRldicl(Instr instr); -+#endif -+ static Register GetCmpImmediateRegister(Instr instr); -+ static int GetCmpImmediateRawImmediate(Instr instr); -+ static bool IsNop(Instr instr, int type = NON_MARKING_NOP); -+ -+ // Postpone the generation of the trampoline pool for the specified number of -+ // instructions. -+ void BlockTrampolinePoolFor(int instructions); -+ void CheckTrampolinePool(); -+ -+ protected: -+ // Relocation for a type-recording IC has the AST id added to it. This -+ // member variable is a way to pass the information from the call site to -+ // the relocation info. -+ TypeFeedbackId recorded_ast_id_; -+ -+ bool emit_debug_code() const { return emit_debug_code_; } -+ -+ int buffer_space() const { return reloc_info_writer.pos() - pc_; } -+ -+ // Decode branch instruction at pos and return branch target pos -+ int target_at(int pos); -+ -+ // Patch branch instruction at pos to branch to given branch target pos -+ void target_at_put(int pos, int target_pos); -+ -+ // Record reloc info for current pc_ -+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); -+ -+ // Block the emission of the trampoline pool before pc_offset. -+ void BlockTrampolinePoolBefore(int pc_offset) { -+ if (no_trampoline_pool_before_ < pc_offset) -+ no_trampoline_pool_before_ = pc_offset; -+ } -+ -+ void StartBlockTrampolinePool() { -+ trampoline_pool_blocked_nesting_++; -+ } -+ -+ void EndBlockTrampolinePool() { -+ trampoline_pool_blocked_nesting_--; -+ } -+ -+ bool is_trampoline_pool_blocked() const { -+ return trampoline_pool_blocked_nesting_ > 0; -+ } -+ -+ bool has_exception() const { -+ return internal_trampoline_exception_; -+ } -+ -+ bool is_trampoline_emitted() const { -+ return trampoline_emitted_; -+ } -+ -+ -+ private: -+ // Code buffer: -+ // The buffer into which code and relocation info are generated. -+ byte* buffer_; -+ int buffer_size_; -+ // True if the assembler owns the buffer, false if buffer is external. -+ bool own_buffer_; -+ -+ // Code generation -+ // The relocation writer's position is at least kGap bytes below the end of -+ // the generated instructions. This is so that multi-instruction sequences do -+ // not have to check for overflow. The same is true for writes of large -+ // relocation info entries. -+ static const int kGap = 32; -+ byte* pc_; // the program counter; moves forward -+ -+ // Repeated checking whether the trampoline pool should be emitted is rather -+ // expensive. By default we only check again once a number of instructions -+ // has been generated. -+ int next_buffer_check_; // pc offset of next buffer check. -+ -+ // Emission of the trampoline pool may be blocked in some code sequences. -+ int trampoline_pool_blocked_nesting_; // Block emission if this is not zero. -+ int no_trampoline_pool_before_; // Block emission before this pc offset. -+ -+ // Relocation info generation -+ // Each relocation is encoded as a variable size value -+ static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; -+ RelocInfoWriter reloc_info_writer; -+ -+ // The bound position, before this we cannot do instruction elimination. -+ int last_bound_pos_; -+ -+ // Code emission -+ inline void CheckBuffer(); -+ void GrowBuffer(); -+ inline void emit(Instr x); -+ inline void CheckTrampolinePoolQuick(); -+ -+ // Instruction generation -+ void a_form(Instr instr, DwVfpRegister frt, DwVfpRegister fra, -+ DwVfpRegister frb, RCBit r); -+ void d_form(Instr instr, Register rt, Register ra, const intptr_t val, -+ bool signed_disp); -+ void x_form(Instr instr, Register ra, Register rs, Register rb, RCBit r); -+ void xo_form(Instr instr, Register rt, Register ra, Register rb, -+ OEBit o, RCBit r); -+ void md_form(Instr instr, Register ra, Register rs, int shift, int maskbit, -+ RCBit r); -+ -+ // Labels -+ void print(Label* L); -+ int max_reach_from(int pos); -+ void bind_to(Label* L, int pos); -+ void next(Label* L); -+ -+ class Trampoline { -+ public: -+ Trampoline() { -+ next_slot_ = 0; -+ free_slot_count_ = 0; -+ } -+ Trampoline(int start, int slot_count) { -+ next_slot_ = start; -+ free_slot_count_ = slot_count; -+ } -+ int take_slot() { -+ int trampoline_slot = kInvalidSlotPos; -+ if (free_slot_count_ <= 0) { -+ // We have run out of space on trampolines. -+ // Make sure we fail in debug mode, so we become aware of each case -+ // when this happens. -+ ASSERT(0); -+ // Internal exception will be caught. -+ } else { -+ trampoline_slot = next_slot_; -+ free_slot_count_--; -+ next_slot_ += kTrampolineSlotsSize; -+ } -+ return trampoline_slot; -+ } -+ -+ private: -+ int next_slot_; -+ int free_slot_count_; -+ }; -+ -+ int32_t get_trampoline_entry(); -+ int unbound_labels_count_; -+ // If trampoline is emitted, generated code is becoming large. As -+ // this is already a slow case which can possibly break our code -+ // generation for the extreme case, we use this information to -+ // trigger different mode of branch instruction generation, where we -+ // no longer use a single branch instruction. -+ bool trampoline_emitted_; -+ static const int kTrampolineSlotsSize = kInstrSize; -+ static const int kMaxCondBranchReach = (1 << (16 - 1)) - 1; -+ static const int kMaxBlockTrampolineSectionSize = 64 * kInstrSize; -+ static const int kInvalidSlotPos = -1; -+ -+ Trampoline trampoline_; -+ bool internal_trampoline_exception_; -+ -+ friend class RegExpMacroAssemblerPPC; -+ friend class RelocInfo; -+ friend class CodePatcher; -+ friend class BlockTrampolinePoolScope; -+ -+ PositionsRecorder positions_recorder_; -+ -+ bool emit_debug_code_; -+ bool predictable_code_size_; -+ -+ friend class PositionsRecorder; -+ friend class EnsureSpace; -+}; -+ -+ -+class EnsureSpace BASE_EMBEDDED { -+ public: -+ explicit EnsureSpace(Assembler* assembler) { -+ assembler->CheckBuffer(); -+ } -+}; -+ -+} } // namespace v8::internal -+ -+#endif // V8_PPC_ASSEMBLER_PPC_H_ -diff -up v8-3.14.5.10/src/ppc/assembler-ppc-inl.h.ppc v8-3.14.5.10/src/ppc/assembler-ppc-inl.h ---- v8-3.14.5.10/src/ppc/assembler-ppc-inl.h.ppc 2016-06-07 14:15:45.984393044 -0400 -+++ v8-3.14.5.10/src/ppc/assembler-ppc-inl.h 2016-06-07 14:15:45.984393044 -0400 -@@ -0,0 +1,457 @@ -+// Copyright (c) 1994-2006 Sun Microsystems Inc. -+// All Rights Reserved. -+// -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions -+// are met: -+// -+// - Redistributions of source code must retain the above copyright notice, -+// this list of conditions and the following disclaimer. -+// -+// - Redistribution in binary form must reproduce the above copyright -+// notice, this list of conditions and the following disclaimer in the -+// documentation and/or other materials provided with the -+// distribution. -+// -+// - Neither the name of Sun Microsystems or the names of contributors may -+// be used to endorse or promote products derived from this software without -+// specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED -+// OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+// The original source code covered by the above license above has been modified -+// significantly by Google Inc. -+// Copyright 2012 the V8 project authors. All rights reserved. -+ -+// -+// Copyright IBM Corp. 2012, 2013. All rights reserved. -+// -+ -+#ifndef V8_PPC_ASSEMBLER_PPC_INL_H_ -+#define V8_PPC_ASSEMBLER_PPC_INL_H_ -+ -+#include "ppc/assembler-ppc.h" -+ -+#include "cpu.h" -+#include "debug.h" -+ -+ -+namespace v8 { -+namespace internal { -+ -+ -+int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) { -+ int index = reg.code() - 1; // d0 is skipped -+ ASSERT(index < kNumAllocatableRegisters); -+ ASSERT(!reg.is(kDoubleRegZero)); -+ ASSERT(!reg.is(kScratchDoubleReg)); -+ return index; -+} -+ -+void RelocInfo::apply(intptr_t delta) { -+ if (RelocInfo::IsInternalReference(rmode_)) { -+ // absolute code pointer inside code object moves with the code object. -+ intptr_t* p = reinterpret_cast(pc_); -+ *p += delta; // relocate entry -+ CPU::FlushICache(p, sizeof(uintptr_t)); -+ } -+ // We do not use pc relative addressing on PPC, so there is -+ // nothing else to do. -+} -+ -+ -+Address RelocInfo::target_address() { -+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); -+ return Assembler::target_address_at(pc_); -+} -+ -+ -+Address RelocInfo::target_address_address() { -+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY -+ || rmode_ == EMBEDDED_OBJECT -+ || rmode_ == EXTERNAL_REFERENCE); -+ -+ // Read the address of the word containing the target_address in an -+ // instruction stream. -+ // The only architecture-independent user of this function is the serializer. -+ // The serializer uses it to find out how many raw bytes of instruction to -+ // output before the next target. -+ // For an instruction like LIS/ADDIC where the target bits are mixed into the -+ // instruction bits, the size of the target will be zero, indicating that the -+ // serializer should not step forward in memory after a target is resolved -+ // and written. In this case the target_address_address function should -+ // return the end of the instructions to be patched, allowing the -+ // deserializer to deserialize the instructions as raw bytes and put them in -+ // place, ready to be patched with the target. -+ -+ return reinterpret_cast
( -+ pc_ + (Assembler::kInstructionsForPtrConstant * -+ Assembler::kInstrSize)); -+} -+ -+ -+int RelocInfo::target_address_size() { -+ return Assembler::kSpecialTargetSize; -+} -+ -+ -+void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) { -+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); -+ Assembler::set_target_address_at(pc_, target); -+ if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) { -+ Object* target_code = Code::GetCodeFromTargetAddress(target); -+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( -+ host(), this, HeapObject::cast(target_code)); -+ } -+} -+ -+ -+Address Assembler::target_address_from_return_address(Address pc) { -+ return pc - kCallTargetAddressOffset; -+} -+ -+ -+Object* RelocInfo::target_object() { -+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); -+ return reinterpret_cast(Assembler::target_address_at(pc_)); -+} -+ -+ -+Handle RelocInfo::target_object_handle(Assembler* origin) { -+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); -+ return Handle(reinterpret_cast( -+ Assembler::target_address_at(pc_))); -+} -+ -+ -+Object** RelocInfo::target_object_address() { -+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); -+ reconstructed_obj_ptr_ = -+ reinterpret_cast(Assembler::target_address_at(pc_)); -+ return &reconstructed_obj_ptr_; -+} -+ -+ -+void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) { -+ ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); -+ Assembler::set_target_address_at(pc_, reinterpret_cast
(target)); -+ if (mode == UPDATE_WRITE_BARRIER && -+ host() != NULL && -+ target->IsHeapObject()) { -+ host()->GetHeap()->incremental_marking()->RecordWrite( -+ host(), &Memory::Object_at(pc_), HeapObject::cast(target)); -+ } -+} -+ -+ -+Address* RelocInfo::target_reference_address() { -+ ASSERT(rmode_ == EXTERNAL_REFERENCE); -+ reconstructed_adr_ptr_ = Assembler::target_address_at(pc_); -+ return &reconstructed_adr_ptr_; -+} -+ -+ -+Handle RelocInfo::target_cell_handle() { -+ ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); -+ Address address = Memory::Address_at(pc_); -+ return Handle( -+ reinterpret_cast(address)); -+} -+ -+ -+JSGlobalPropertyCell* RelocInfo::target_cell() { -+ ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); -+ return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_)); -+} -+ -+ -+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell, -+ WriteBarrierMode mode) { -+ ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); -+ Address address = cell->address() + JSGlobalPropertyCell::kValueOffset; -+ Memory::Address_at(pc_) = address; -+ if (mode == UPDATE_WRITE_BARRIER && host() != NULL) { -+ // TODO(1550) We are passing NULL as a slot because cell can never be on -+ // evacuation candidate. -+ host()->GetHeap()->incremental_marking()->RecordWrite( -+ host(), NULL, cell); -+ } -+} -+ -+ -+Address RelocInfo::call_address() { -+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || -+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); -+ // The pc_ offset of 0 assumes patched return sequence per -+ // BreakLocationIterator::SetDebugBreakAtReturn(), or debug break -+ // slot per BreakLocationIterator::SetDebugBreakAtSlot(). -+ return Assembler::target_address_at(pc_); -+} -+ -+ -+void RelocInfo::set_call_address(Address target) { -+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || -+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); -+ Assembler::set_target_address_at(pc_, target); -+ if (host() != NULL) { -+ Object* target_code = Code::GetCodeFromTargetAddress(target); -+ host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( -+ host(), this, HeapObject::cast(target_code)); -+ } -+} -+ -+ -+Object* RelocInfo::call_object() { -+ return *call_object_address(); -+} -+ -+ -+void RelocInfo::set_call_object(Object* target) { -+ *call_object_address() = target; -+} -+ -+ -+Object** RelocInfo::call_object_address() { -+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || -+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); -+ return reinterpret_cast(pc_ + 2 * Assembler::kInstrSize); -+} -+ -+ -+bool RelocInfo::IsPatchedReturnSequence() { -+ // -+ // The patched return sequence is defined by -+ // BreakLocationIterator::SetDebugBreakAtReturn() -+ // FIXED_SEQUENCE -+ -+ Instr instr0 = Assembler::instr_at(pc_); -+ Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize); -+#if V8_TARGET_ARCH_PPC64 -+ Instr instr3 = Assembler::instr_at(pc_ + (3 * Assembler::kInstrSize)); -+ Instr instr4 = Assembler::instr_at(pc_ + (4 * Assembler::kInstrSize)); -+ Instr binstr = Assembler::instr_at(pc_ + (7 * Assembler::kInstrSize)); -+#else -+ Instr binstr = Assembler::instr_at(pc_ + 4 * Assembler::kInstrSize); -+#endif -+ bool patched_return = ((instr0 & kOpcodeMask) == ADDIS && -+ (instr1 & kOpcodeMask) == ORI && -+#if V8_TARGET_ARCH_PPC64 -+ (instr3 & kOpcodeMask) == ORIS && -+ (instr4 & kOpcodeMask) == ORI && -+#endif -+ (binstr == 0x7d821008)); // twge r2, r2 -+ -+// printf("IsPatchedReturnSequence: %d\n", patched_return); -+ return patched_return; -+} -+ -+ -+bool RelocInfo::IsPatchedDebugBreakSlotSequence() { -+ Instr current_instr = Assembler::instr_at(pc_); -+ return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP); -+} -+ -+ -+void RelocInfo::Visit(ObjectVisitor* visitor) { -+ RelocInfo::Mode mode = rmode(); -+ if (mode == RelocInfo::EMBEDDED_OBJECT) { -+ visitor->VisitEmbeddedPointer(this); -+ } else if (RelocInfo::IsCodeTarget(mode)) { -+ visitor->VisitCodeTarget(this); -+ } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { -+ visitor->VisitGlobalPropertyCell(this); -+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { -+ visitor->VisitExternalReference(this); -+#ifdef ENABLE_DEBUGGER_SUPPORT -+ // TODO(isolates): Get a cached isolate below. -+ } else if (((RelocInfo::IsJSReturn(mode) && -+ IsPatchedReturnSequence()) || -+ (RelocInfo::IsDebugBreakSlot(mode) && -+ IsPatchedDebugBreakSlotSequence())) && -+ Isolate::Current()->debug()->has_break_points()) { -+ visitor->VisitDebugTarget(this); -+#endif -+ } else if (mode == RelocInfo::RUNTIME_ENTRY) { -+ visitor->VisitRuntimeEntry(this); -+ } -+} -+ -+ -+template -+void RelocInfo::Visit(Heap* heap) { -+ RelocInfo::Mode mode = rmode(); -+ if (mode == RelocInfo::EMBEDDED_OBJECT) { -+ StaticVisitor::VisitEmbeddedPointer(heap, this); -+ } else if (RelocInfo::IsCodeTarget(mode)) { -+ StaticVisitor::VisitCodeTarget(heap, this); -+ } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { -+ StaticVisitor::VisitGlobalPropertyCell(heap, this); -+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { -+ StaticVisitor::VisitExternalReference(this); -+#ifdef ENABLE_DEBUGGER_SUPPORT -+ } else if (heap->isolate()->debug()->has_break_points() && -+ ((RelocInfo::IsJSReturn(mode) && -+ IsPatchedReturnSequence()) || -+ (RelocInfo::IsDebugBreakSlot(mode) && -+ IsPatchedDebugBreakSlotSequence()))) { -+ StaticVisitor::VisitDebugTarget(heap, this); -+#endif -+ } else if (mode == RelocInfo::RUNTIME_ENTRY) { -+ StaticVisitor::VisitRuntimeEntry(this); -+ } -+} -+ -+Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) { -+ rm_ = no_reg; -+ imm_ = immediate; -+ rmode_ = rmode; -+} -+ -+Operand::Operand(const ExternalReference& f) { -+ rm_ = no_reg; -+ imm_ = reinterpret_cast(f.address()); -+ rmode_ = RelocInfo::EXTERNAL_REFERENCE; -+} -+ -+Operand::Operand(Smi* value) { -+ rm_ = no_reg; -+ imm_ = reinterpret_cast(value); -+ rmode_ = RelocInfo::NONE; -+} -+ -+Operand::Operand(Register rm) { -+ rm_ = rm; -+ rmode_ = RelocInfo::NONE; // PPC -why doesn't ARM do this? -+} -+ -+void Assembler::CheckBuffer() { -+ if (buffer_space() <= kGap) { -+ GrowBuffer(); -+ } -+} -+ -+void Assembler::CheckTrampolinePoolQuick() { -+ if (pc_offset() >= next_buffer_check_) { -+ CheckTrampolinePool(); -+ } -+} -+ -+void Assembler::emit(Instr x) { -+ CheckBuffer(); -+ *reinterpret_cast(pc_) = x; -+ pc_ += kInstrSize; -+ CheckTrampolinePoolQuick(); -+} -+ -+bool Operand::is_reg() const { -+ return rm_.is_valid(); -+} -+ -+ -+// Fetch the 32bit value from the FIXED_SEQUENCE lis/ori -+Address Assembler::target_address_at(Address pc) { -+ Instr instr1 = instr_at(pc); -+ Instr instr2 = instr_at(pc + kInstrSize); -+#if V8_TARGET_ARCH_PPC64 -+ Instr instr4 = instr_at(pc + (3*kInstrSize)); -+ Instr instr5 = instr_at(pc + (4*kInstrSize)); -+#endif -+ // Interpret 2 instructions generated by lis/ori -+ if (IsLis(instr1) && IsOri(instr2)) { -+#if V8_TARGET_ARCH_PPC64 -+ // Assemble the 64 bit value. -+ uint64_t hi = (static_cast((instr1 & kImm16Mask) << 16) | -+ static_cast(instr2 & kImm16Mask)); -+ uint64_t lo = (static_cast((instr4 & kImm16Mask) << 16) | -+ static_cast(instr5 & kImm16Mask)); -+ return reinterpret_cast
((hi << 32) | lo); -+#else -+ // Assemble the 32 bit value. -+ return reinterpret_cast
( -+ ((instr1 & kImm16Mask) << 16) | (instr2 & kImm16Mask)); -+#endif -+ } -+ -+ PPCPORT_UNIMPLEMENTED(); -+ return (Address)0; -+} -+ -+ -+// This sets the branch destination (which gets loaded at the call address). -+// This is for calls and branches within generated code. The serializer -+// has already deserialized the lis/ori instructions etc. -+// There is a FIXED_SEQUENCE assumption here -+void Assembler::deserialization_set_special_target_at( -+ Address instruction_payload, Address target) { -+ set_target_address_at( -+ instruction_payload - kInstructionsForPtrConstant * kInstrSize, -+ target); -+} -+ -+// This code assumes the FIXED_SEQUENCE of lis/ori -+void Assembler::set_target_address_at(Address pc, Address target) { -+ Instr instr1 = instr_at(pc); -+ Instr instr2 = instr_at(pc + kInstrSize); -+ // Interpret 2 instructions generated by lis/ori -+ if (IsLis(instr1) && IsOri(instr2)) { -+#if V8_TARGET_ARCH_PPC64 -+ Instr instr4 = instr_at(pc + (3*kInstrSize)); -+ Instr instr5 = instr_at(pc + (4*kInstrSize)); -+ // Needs to be fixed up when mov changes to handle 64-bit values. -+ uint32_t* p = reinterpret_cast(pc); -+ uintptr_t itarget = reinterpret_cast(target); -+ -+ instr5 &= ~kImm16Mask; -+ instr5 |= itarget & kImm16Mask; -+ itarget = itarget >> 16; -+ -+ instr4 &= ~kImm16Mask; -+ instr4 |= itarget & kImm16Mask; -+ itarget = itarget >> 16; -+ -+ instr2 &= ~kImm16Mask; -+ instr2 |= itarget & kImm16Mask; -+ itarget = itarget >> 16; -+ -+ instr1 &= ~kImm16Mask; -+ instr1 |= itarget & kImm16Mask; -+ itarget = itarget >> 16; -+ -+ *p = instr1; -+ *(p+1) = instr2; -+ *(p+3) = instr4; -+ *(p+4) = instr5; -+ CPU::FlushICache(p, 20); -+#else -+ uint32_t* p = reinterpret_cast(pc); -+ uint32_t itarget = reinterpret_cast(target); -+ int lo_word = itarget & kImm16Mask; -+ int hi_word = itarget >> 16; -+ instr1 &= ~kImm16Mask; -+ instr1 |= hi_word; -+ instr2 &= ~kImm16Mask; -+ instr2 |= lo_word; -+ -+ *p = instr1; -+ *(p+1) = instr2; -+ CPU::FlushICache(p, 8); -+#endif -+ } else { -+ UNREACHABLE(); -+ } -+} -+ -+} } // namespace v8::internal -+ -+#endif // V8_PPC_ASSEMBLER_PPC_INL_H_ -diff -up v8-3.14.5.10/src/ppc/builtins-ppc.cc.ppc v8-3.14.5.10/src/ppc/builtins-ppc.cc ---- v8-3.14.5.10/src/ppc/builtins-ppc.cc.ppc 2016-06-07 14:15:45.986393032 -0400 -+++ v8-3.14.5.10/src/ppc/builtins-ppc.cc 2016-06-07 14:15:45.986393032 -0400 -@@ -0,0 +1,1910 @@ -+// Copyright 2012 the V8 project authors. All rights reserved. -+// -+// Copyright IBM Corp. 2012, 2013. All rights reserved. -+// -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions are -+// met: -+// -+// * Redistributions of source code must retain the above copyright -+// notice, this list of conditions and the following disclaimer. -+// * Redistributions in binary form must reproduce the above -+// copyright notice, this list of conditions and the following -+// disclaimer in the documentation and/or other materials provided -+// with the distribution. -+// * Neither the name of Google Inc. nor the names of its -+// contributors may be used to endorse or promote products derived -+// from this software without specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+#include "v8.h" -+ -+#if defined(V8_TARGET_ARCH_PPC) -+ -+#include "codegen.h" -+#include "debug.h" -+#include "deoptimizer.h" -+#include "full-codegen.h" -+#include "runtime.h" -+ -+namespace v8 { -+namespace internal { -+ -+ -+#define __ ACCESS_MASM(masm) -+ -+void Builtins::Generate_Adaptor(MacroAssembler* masm, -+ CFunctionId id, -+ BuiltinExtraArguments extra_args) { -+ // ----------- S t a t e ------------- -+ // -- r3 : number of arguments excluding receiver -+ // -- r4 : called function (only guaranteed when -+ // extra_args requires it) -+ // -- cp : context -+ // -- sp[0] : last argument -+ // -- ... -+ // -- sp[4 * (argc - 1)] : first argument (argc == r0) -+ // -- sp[4 * argc] : receiver -+ // ----------------------------------- -+ -+ // Insert extra arguments. -+ int num_extra_args = 0; -+ if (extra_args == NEEDS_CALLED_FUNCTION) { -+ num_extra_args = 1; -+ __ push(r4); -+ } else { -+ ASSERT(extra_args == NO_EXTRA_ARGUMENTS); -+ } -+ -+ // JumpToExternalReference expects r0 to contain the number of arguments -+ // including the receiver and the extra arguments. -+ __ addi(r3, r3, Operand(num_extra_args + 1)); -+ __ JumpToExternalReference(ExternalReference(id, masm->isolate())); -+} -+ -+ -+// Load the built-in InternalArray function from the current context. -+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm, -+ Register result) { -+ // Load the native context. -+ -+ __ LoadP(result, -+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); -+ __ LoadP(result, -+ FieldMemOperand(result, GlobalObject::kNativeContextOffset)); -+ // Load the InternalArray function from the native context. -+ __ LoadP(result, -+ MemOperand(result, -+ Context::SlotOffset( -+ Context::INTERNAL_ARRAY_FUNCTION_INDEX))); -+} -+ -+ -+// Load the built-in Array function from the current context. -+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) { -+ // Load the native context. -+ -+ __ LoadP(result, -+ MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); -+ __ LoadP(result, -+ FieldMemOperand(result, GlobalObject::kNativeContextOffset)); -+ // Load the Array function from the native context. -+ __ LoadP(result, -+ MemOperand(result, -+ Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); -+} -+ -+ -+// Allocate an empty JSArray. The allocated array is put into the result -+// register. An elements backing store is allocated with size initial_capacity -+// and filled with the hole values. -+static void AllocateEmptyJSArray(MacroAssembler* masm, -+ Register array_function, -+ Register result, -+ Register scratch1, -+ Register scratch2, -+ Register scratch3, -+ Label* gc_required) { -+ const int initial_capacity = JSArray::kPreallocatedArrayElements; -+ STATIC_ASSERT(initial_capacity >= 0); -+ __ LoadInitialArrayMap(array_function, scratch2, scratch1, false); -+ -+ // Allocate the JSArray object together with space for a fixed array with the -+ // requested elements. -+ int size = JSArray::kSize; -+ if (initial_capacity > 0) { -+ size += FixedArray::SizeFor(initial_capacity); -+ } -+ __ AllocateInNewSpace(size, -+ result, -+ scratch2, -+ scratch3, -+ gc_required, -+ TAG_OBJECT); -+ -+ // Future optimization: defer tagging the result pointer for more -+ // efficient 64-bit memory accesses (due to alignment requirements -+ // on the memoperand offset). -+ -+ // Allocated the JSArray. Now initialize the fields except for the elements -+ // array. -+ // result: JSObject -+ // scratch1: initial map -+ // scratch2: start of next object -+ __ StoreP(scratch1, FieldMemOperand(result, JSObject::kMapOffset), r0); -+ __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex); -+ __ StoreP(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset), r0); -+ // Field JSArray::kElementsOffset is initialized later. -+ __ li(scratch3, Operand(0, RelocInfo::NONE)); -+ __ StoreP(scratch3, FieldMemOperand(result, JSArray::kLengthOffset), r0); -+ -+ if (initial_capacity == 0) { -+ __ StoreP(scratch1, FieldMemOperand(result, JSArray::kElementsOffset), r0); -+ return; -+ } -+ -+ // Calculate the location of the elements array and set elements array member -+ // of the JSArray. -+ // result: JSObject -+ // scratch2: start of next object -+ __ addi(scratch1, result, Operand(JSArray::kSize)); -+ __ StoreP(scratch1, FieldMemOperand(result, JSArray::kElementsOffset), r0); -+ -+ // Clear the heap tag on the elements array. -+ __ subi(scratch1, scratch1, Operand(kHeapObjectTag)); -+ -+ // Initialize the FixedArray and fill it with holes. FixedArray length is -+ // stored as a smi. -+ // result: JSObject -+ // scratch1: elements array (untagged) -+ // scratch2: start of next object -+ __ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex); -+ STATIC_ASSERT(0 * kPointerSize == FixedArray::kMapOffset); -+ __ StoreP(scratch3, MemOperand(scratch1)); -+ __ addi(scratch1, scratch1, Operand(kPointerSize)); -+ __ LoadSmiLiteral(scratch3, Smi::FromInt(initial_capacity)); -+ STATIC_ASSERT(1 * kPointerSize == FixedArray::kLengthOffset); -+ __ StoreP(scratch3, MemOperand(scratch1)); -+ __ addi(scratch1, scratch1, Operand(kPointerSize)); -+ -+ // Fill the FixedArray with the hole value. Inline the code if short. -+ STATIC_ASSERT(2 * kPointerSize == FixedArray::kHeaderSize); -+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex); -+ static const int kLoopUnfoldLimit = 4; -+ if (initial_capacity <= kLoopUnfoldLimit) { -+ for (int i = 0; i < initial_capacity; i++) { -+ __ StoreP(scratch3, MemOperand(scratch1)); -+ __ addi(scratch1, scratch1, Operand(kPointerSize)); -+ } -+ } else { -+ Label loop, entry; -+ __ addi(scratch2, scratch1, Operand(initial_capacity * kPointerSize)); -+ __ b(&entry); -+ __ bind(&loop); -+ __ StoreP(scratch3, MemOperand(scratch1)); -+ __ addi(scratch1, scratch1, Operand(kPointerSize)); -+ __ bind(&entry); -+ __ cmp(scratch1, scratch2); -+ __ blt(&loop); -+ } -+} -+ -+// Allocate a JSArray with the number of elements stored in a register. The -+// register array_function holds the built-in Array function and the register -+// array_size holds the size of the array as a smi. The allocated array is put -+// into the result register and beginning and end of the FixedArray elements -+// storage is put into registers elements_array_storage and elements_array_end -+// (see below for when that is not the case). If the parameter fill_with_holes -+// is true the allocated elements backing store is filled with the hole values -+// otherwise it is left uninitialized. When the backing store is filled the -+// register elements_array_storage is scratched. -+static void AllocateJSArray(MacroAssembler* masm, -+ Register array_function, // Array function. -+ Register array_size, // As a smi, cannot be 0. -+ Register result, -+ Register elements_array_storage, -+ Register elements_array_end, -+ Register scratch1, -+ Register scratch2, -+ bool fill_with_hole, -+ Label* gc_required) { -+ // Load the initial map from the array function. -+ __ LoadInitialArrayMap(array_function, scratch2, -+ elements_array_storage, fill_with_hole); -+ -+ if (FLAG_debug_code) { // Assert that array size is not zero. -+ __ cmpi(array_size, Operand::Zero()); -+ __ Assert(ne, "array size is unexpectedly 0"); -+ } -+ -+ // Allocate the JSArray object together with space for a FixedArray with the -+ // requested number of elements. We omit the TAG_OBJECT flag and defer -+ // tagging the pointer until the end so that we can more efficiently perform -+ // aligned memory accesses. -+ __ li(elements_array_end, -+ Operand((JSArray::kSize + FixedArray::kHeaderSize) / kPointerSize)); -+ __ SmiUntag(scratch1, array_size); -+ __ add(elements_array_end, elements_array_end, scratch1); -+ __ AllocateInNewSpace( -+ elements_array_end, -+ result, -+ scratch1, -+ scratch2, -+ gc_required, -+ static_cast(SIZE_IN_WORDS)); -+ -+ // Allocated the JSArray. Now initialize the fields except for the elements -+ // array. -+ // result: JSObject (untagged) -+ // elements_array_storage: initial map -+ // array_size: size of array (smi) -+ __ StoreP(elements_array_storage, MemOperand(result, JSObject::kMapOffset)); -+ __ LoadRoot(elements_array_storage, Heap::kEmptyFixedArrayRootIndex); -+ __ StoreP(elements_array_storage, -+ MemOperand(result, JSArray::kPropertiesOffset)); -+ // Field JSArray::kElementsOffset is initialized later. -+ __ StoreP(array_size, MemOperand(result, JSArray::kLengthOffset)); -+ -+ // Calculate the location of the elements array and set elements array member -+ // of the JSArray. -+ // result: JSObject (untagged) -+ // array_size: size of array (smi) -+ __ addi(elements_array_storage, result, -+ Operand(JSArray::kSize + kHeapObjectTag)); -+ __ StoreP(elements_array_storage, -+ MemOperand(result, JSArray::kElementsOffset)); -+ -+ // Clear the heap tag on the elements array. -+ STATIC_ASSERT(kSmiTag == 0); -+ __ subi(elements_array_storage, -+ elements_array_storage, -+ Operand(kHeapObjectTag)); -+ // Initialize the fixed array and fill it with holes. FixedArray length is -+ // stored as a smi. -+ // result: JSObject (untagged) -+ // elements_array_storage: elements array (untagged) -+ // array_size: size of array (smi) -+ __ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex); -+ ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset); -+ __ StoreP(scratch1, MemOperand(elements_array_storage)); -+ __ addi(elements_array_storage, elements_array_storage, -+ Operand(kPointerSize)); -+ STATIC_ASSERT(kSmiTag == 0); -+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); -+ __ StoreP(array_size, MemOperand(elements_array_storage)); -+ __ addi(elements_array_storage, elements_array_storage, -+ Operand(kPointerSize)); -+ -+ // Calculate elements array and elements array end. -+ // result: JSObject (untagged) -+ // elements_array_storage: elements array element storage -+ // array_size: smi-tagged size of elements array -+ __ SmiToPtrArrayOffset(scratch1, array_size); -+ __ add(elements_array_end, elements_array_storage, scratch1); -+ -+ // Fill the allocated FixedArray with the hole value if requested. -+ // result: JSObject (untagged) -+ // elements_array_storage: elements array element storage -+ // elements_array_end: start of next object -+ if (fill_with_hole) { -+ Label loop, entry; -+ __ LoadRoot(scratch1, Heap::kTheHoleValueRootIndex); -+ __ b(&entry); -+ __ bind(&loop); -+ __ StoreP(scratch1, MemOperand(elements_array_storage)); -+ __ addi(elements_array_storage, elements_array_storage, -+ Operand(kPointerSize)); -+ __ bind(&entry); -+ __ cmp(elements_array_storage, elements_array_end); -+ __ blt(&loop); -+ } -+ -+ // Tag object -+ __ addi(result, result, Operand(kHeapObjectTag)); -+} -+ -+// Create a new array for the built-in Array function. This function allocates -+// the JSArray object and the FixedArray elements array and initializes these. -+// If the Array cannot be constructed in native code the runtime is called. This -+// function assumes the following state: -+// r3: argc -+// r4: constructor (built-in Array function) -+// lr: return address -+// sp[0]: last argument -+// This function is used for both construct and normal calls of Array. The only -+// difference between handling a construct call and a normal call is that for a -+// construct call the constructor function in r1 needs to be preserved for -+// entering the generic code. In both cases argc in r0 needs to be preserved. -+// Both registers are preserved by this code so no need to differentiate between -+// construct call and normal call. -+static void ArrayNativeCode(MacroAssembler* masm, -+ Label* call_generic_code) { -+ Counters* counters = masm->isolate()->counters(); -+ Label argc_one_or_more, argc_two_or_more, not_empty_array, empty_array, -+ has_non_smi_element, finish, cant_transition_map, not_double; -+ -+ // Check for array construction with zero arguments or one. -+ __ cmpi(r3, Operand(0, RelocInfo::NONE)); -+ __ bne(&argc_one_or_more); -+ -+ // Handle construction of an empty array. -+ __ bind(&empty_array); -+ AllocateEmptyJSArray(masm, -+ r4, -+ r5, -+ r6, -+ r7, -+ r8, -+ call_generic_code); -+ __ IncrementCounter(counters->array_function_native(), 1, r6, r7); -+ // Set up return value, remove receiver from stack and return. -+ __ mr(r3, r5); -+ __ addi(sp, sp, Operand(kPointerSize)); -+ __ blr(); -+ -+ // Check for one argument. Bail out if argument is not smi or if it is -+ // negative. -+ __ bind(&argc_one_or_more); -+ __ cmpi(r3, Operand(1)); -+ __ bne(&argc_two_or_more); -+ STATIC_ASSERT(kSmiTag == 0); -+ __ LoadP(r5, MemOperand(sp)); // Get the argument from the stack. -+ __ cmpi(r5, Operand::Zero()); -+ __ bne(¬_empty_array); -+ __ Drop(1); // Adjust stack. -+ __ li(r3, Operand::Zero()); // Treat this as a call with argc of zero. -+ __ b(&empty_array); -+ -+ __ bind(¬_empty_array); -+ __ TestIfPositiveSmi(r5, r6); -+ __ bne(call_generic_code, cr0); -+ -+ // Handle construction of an empty array of a certain size. Bail out if size -+ // is too large to actually allocate an elements array. -+ STATIC_ASSERT(kSmiTag == 0); -+ __ CmpSmiLiteral(r5, Smi::FromInt(JSObject::kInitialMaxFastElementArray), r0); -+ __ bge(call_generic_code); -+ -+ // r3: argc -+ // r4: constructor -+ // r5: array_size (smi) -+ // sp[0]: argument -+ AllocateJSArray(masm, -+ r4, -+ r5, -+ r6, -+ r7, -+ r8, -+ r9, -+ r10, -+ true, -+ call_generic_code); -+ __ IncrementCounter(counters->array_function_native(), 1, r5, r7); -+ // Set up return value, remove receiver and argument from stack and return. -+ __ mr(r3, r6); -+ __ addi(sp, sp, Operand(2 * kPointerSize)); -+ __ blr(); -+ -+ // Handle construction of an array from a list of arguments. -+ __ bind(&argc_two_or_more); -+ // Convet argc to a smi. -+ __ SmiTag(r5, r3); -+ -+ // r3: argc -+ // r4: constructor -+ // r5: array_size (smi) -+ // sp[0]: last argument -+ AllocateJSArray(masm, -+ r4, -+ r5, -+ r6, -+ r7, -+ r8, -+ r9, -+ r10, -+ false, -+ call_generic_code); -+ __ IncrementCounter(counters->array_function_native(), 1, r5, r9); -+ -+ // Fill arguments as array elements. Copy from the top of the stack (last -+ // element) to the array backing store filling it backwards. Note: -+ // elements_array_end points after the backing store therefore PreIndex is -+ // used when filling the backing store. -+ // r3: argc -+ // r6: JSArray -+ // r7: elements_array storage start (untagged) -+ // r8: elements_array_end (untagged) -+ // sp[0]: last argument -+ Label loop, entry; -+ __ mr(r10, sp); -+ __ b(&entry); -+ __ bind(&loop); -+ __ LoadP(r5, MemOperand(r10)); -+ __ addi(r10, r10, Operand(kPointerSize)); -+ if (FLAG_smi_only_arrays) { -+ __ JumpIfNotSmi(r5, &has_non_smi_element); -+ } -+ __ StorePU(r5, MemOperand(r8, -kPointerSize)); -+ __ bind(&entry); -+ __ cmp(r7, r8); -+ __ blt(&loop); -+ -+ __ bind(&finish); -+ __ mr(sp, r10); -+ -+ // Remove caller arguments and receiver from the stack, setup return value and -+ // return. -+ // r3: argc -+ // r6: JSArray -+ // sp[0]: receiver -+ __ addi(sp, sp, Operand(kPointerSize)); -+ __ mr(r3, r6); -+ __ blr(); -+ -+ __ bind(&has_non_smi_element); -+ // Double values are handled by the runtime. -+ __ CheckMap( -+ r5, r22, Heap::kHeapNumberMapRootIndex, ¬_double, DONT_DO_SMI_CHECK); -+ __ bind(&cant_transition_map); -+ __ UndoAllocationInNewSpace(r6, r7); -+ __ b(call_generic_code); -+ -+ __ bind(¬_double); -+ // Transition FAST_SMI_ELEMENTS to FAST_ELEMENTS. -+ // r6: JSArray -+ __ LoadP(r5, FieldMemOperand(r6, HeapObject::kMapOffset)); -+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, -+ FAST_ELEMENTS, -+ r5, -+ r22, -+ &cant_transition_map); -+ __ StoreP(r5, FieldMemOperand(r6, HeapObject::kMapOffset), r0); -+ __ RecordWriteField(r6, -+ HeapObject::kMapOffset, -+ r5, -+ r22, -+ kLRHasNotBeenSaved, -+ kDontSaveFPRegs, -+ EMIT_REMEMBERED_SET, -+ OMIT_SMI_CHECK); -+ Label loop2; -+ __ subi(r10, r10, Operand(kPointerSize)); -+ __ bind(&loop2); -+ __ LoadP(r5, MemOperand(r10)); -+ __ addi(r10, r10, Operand(kPointerSize)); -+ __ StorePU(r5, MemOperand(r8, -kPointerSize)); -+ __ cmp(r7, r8); -+ __ blt(&loop2); -+ __ b(&finish); -+} -+ -+ -+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) { -+ // ----------- S t a t e ------------- -+ // -- r3 : number of arguments -+ // -- lr : return address -+ // -- sp[...]: constructor arguments -+ // ----------------------------------- -+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments; -+ -+ // Get the InternalArray function. -+ GenerateLoadInternalArrayFunction(masm, r4); -+ -+ if (FLAG_debug_code) { -+ // Initial map for the builtin InternalArray functions should be maps. -+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset)); -+ STATIC_ASSERT(kSmiTagMask < 0x8000); -+ __ andi(r0, r5, Operand(kSmiTagMask)); -+ __ Assert(ne, "Unexpected initial map for InternalArray function", cr0); -+ __ CompareObjectType(r5, r6, r7, MAP_TYPE); -+ __ Assert(eq, "Unexpected initial map for InternalArray function"); -+ } -+ -+ // Run the native code for the InternalArray function called as a normal -+ // function. -+ ArrayNativeCode(masm, &generic_array_code); -+ -+ // Jump to the generic array code if the specialized code cannot handle the -+ // construction. -+ __ bind(&generic_array_code); -+ -+ Handle array_code = -+ masm->isolate()->builtins()->InternalArrayCodeGeneric(); -+ __ Jump(array_code, RelocInfo::CODE_TARGET); -+} -+ -+ -+void Builtins::Generate_ArrayCode(MacroAssembler* masm) { -+ // ----------- S t a t e ------------- -+ // -- r3 : number of arguments -+ // -- lr : return address -+ // -- sp[...]: constructor arguments -+ // ----------------------------------- -+ Label generic_array_code, one_or_more_arguments, two_or_more_arguments; -+ -+ // Get the Array function. -+ GenerateLoadArrayFunction(masm, r4); -+ -+ if (FLAG_debug_code) { -+ // Initial map for the builtin Array functions should be maps. -+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset)); -+ STATIC_ASSERT(kSmiTagMask < 0x8000); -+ __ andi(r0, r5, Operand(kSmiTagMask)); -+ __ Assert(ne, "Unexpected initial map for Array function", cr0); -+ __ CompareObjectType(r5, r6, r7, MAP_TYPE); -+ __ Assert(eq, "Unexpected initial map for Array function"); -+ } -+ -+ // Run the native code for the Array function called as a normal function. -+ ArrayNativeCode(masm, &generic_array_code); -+ -+ // Jump to the generic array code if the specialized code cannot handle -+ // the construction. -+ __ bind(&generic_array_code); -+ -+ Handle array_code = -+ masm->isolate()->builtins()->ArrayCodeGeneric(); -+ __ Jump(array_code, RelocInfo::CODE_TARGET); -+} -+ -+ -+void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { -+ // ----------- S t a t e ------------- -+ // -- r3 : number of arguments -+ // -- r4 : constructor function -+ // -- lr : return address -+ // -- sp[...]: constructor arguments -+ // ----------------------------------- -+ Label generic_constructor; -+ -+ if (FLAG_debug_code) { -+ // The array construct code is only set for the builtin and internal -+ // Array functions which always have a map. -+ // Initial map for the builtin Array function should be a map. -+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset)); -+ __ andi(r0, r5, Operand(kSmiTagMask)); -+ __ Assert(ne, "Unexpected initial map for Array function", cr0); -+ __ CompareObjectType(r5, r6, r7, MAP_TYPE); -+ __ Assert(eq, "Unexpected initial map for Array function"); -+ } -+ -+ // Run the native code for the Array function called as a constructor. -+ ArrayNativeCode(masm, &generic_constructor); -+ -+ // Jump to the generic construct code in case the specialized code cannot -+ // handle the construction. -+ __ bind(&generic_constructor); -+ Handle generic_construct_stub = -+ masm->isolate()->builtins()->JSConstructStubGeneric(); -+ __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); -+} -+ -+ -+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { -+ // ----------- S t a t e ------------- -+ // -- r3 : number of arguments -+ // -- r4 : constructor function -+ // -- lr : return address -+ // -- sp[(argc - n - 1) * 4] : arg[n] (zero based) -+ // -- sp[argc * 4] : receiver -+ // ----------------------------------- -+ Counters* counters = masm->isolate()->counters(); -+ __ IncrementCounter(counters->string_ctor_calls(), 1, r5, r6); -+ -+ Register function = r4; -+ if (FLAG_debug_code) { -+ __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, r5); -+ __ cmp(function, r5); -+ __ Assert(eq, "Unexpected String function"); -+ } -+ -+ // Load the first arguments in r3 and get rid of the rest. -+ Label no_arguments; -+ __ cmpi(r3, Operand(0, RelocInfo::NONE)); -+ __ beq(&no_arguments); -+ // First args = sp[(argc - 1) * 4]. -+ __ subi(r3, r3, Operand(1)); -+ __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2)); -+ __ add(sp, sp, r3); -+ __ LoadP(r3, MemOperand(sp)); -+ // sp now point to args[0], drop args[0] + receiver. -+ __ Drop(2); -+ -+ Register argument = r5; -+ Label not_cached, argument_is_string; -+ NumberToStringStub::GenerateLookupNumberStringCache( -+ masm, -+ r3, // Input. -+ argument, // Result. -+ r6, // Scratch. -+ r7, // Scratch. -+ r8, // Scratch. -+ false, // Is it a Smi? -+ ¬_cached); -+ __ IncrementCounter(counters->string_ctor_cached_number(), 1, r6, r7); -+ __ bind(&argument_is_string); -+ -+ // ----------- S t a t e ------------- -+ // -- r5 : argument converted to string -+ // -- r4 : constructor function -+ // -- lr : return address -+ // ----------------------------------- -+ -+ Label gc_required; -+ __ AllocateInNewSpace(JSValue::kSize, -+ r3, // Result. -+ r6, // Scratch. -+ r7, // Scratch. -+ &gc_required, -+ TAG_OBJECT); -+ -+ // Initialising the String Object. -+ Register map = r6; -+ __ LoadGlobalFunctionInitialMap(function, map, r7); -+ if (FLAG_debug_code) { -+ __ lbz(r7, FieldMemOperand(map, Map::kInstanceSizeOffset)); -+ __ cmpi(r7, Operand(JSValue::kSize >> kPointerSizeLog2)); -+ __ Assert(eq, "Unexpected string wrapper instance size"); -+ __ lbz(r7, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset)); -+ __ cmpi(r7, Operand(0, RelocInfo::NONE)); -+ __ Assert(eq, "Unexpected unused properties of string wrapper"); -+ } -+ __ StoreP(map, FieldMemOperand(r3, HeapObject::kMapOffset), r0); -+ -+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); -+ __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0); -+ __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0); -+ -+ __ StoreP(argument, FieldMemOperand(r3, JSValue::kValueOffset), r0); -+ -+ // Ensure the object is fully initialized. -+ STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize); -+ -+ __ Ret(); -+ -+ // The argument was not found in the number to string cache. Check -+ // if it's a string already before calling the conversion builtin. -+ Label convert_argument; -+ __ bind(¬_cached); -+ __ JumpIfSmi(r3, &convert_argument); -+ -+ // Is it a String? -+ __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset)); -+ __ lbz(r6, FieldMemOperand(r5, Map::kInstanceTypeOffset)); -+ STATIC_ASSERT(kNotStringTag != 0); -+ __ andi(r0, r6, Operand(kIsNotStringMask)); -+ __ bne(&convert_argument, cr0); -+ __ mr(argument, r3); -+ __ IncrementCounter(counters->string_ctor_conversions(), 1, r6, r7); -+ __ b(&argument_is_string); -+ -+ // Invoke the conversion builtin and put the result into r5. -+ __ bind(&convert_argument); -+ __ push(function); // Preserve the function. -+ __ IncrementCounter(counters->string_ctor_conversions(), 1, r6, r7); -+ { -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ __ push(r3); -+ __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); -+ } -+ __ pop(function); -+ __ mr(argument, r3); -+ __ b(&argument_is_string); -+ -+ // Load the empty string into r5, remove the receiver from the -+ // stack, and jump back to the case where the argument is a string. -+ __ bind(&no_arguments); -+ __ LoadRoot(argument, Heap::kEmptyStringRootIndex); -+ __ Drop(1); -+ __ b(&argument_is_string); -+ -+ // At this point the argument is already a string. Call runtime to -+ // create a string wrapper. -+ __ bind(&gc_required); -+ __ IncrementCounter(counters->string_ctor_gc_required(), 1, r6, r7); -+ { -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ __ push(argument); -+ __ CallRuntime(Runtime::kNewStringWrapper, 1); -+ } -+ __ Ret(); -+} -+ -+ -+static void GenerateTailCallToSharedCode(MacroAssembler* masm) { -+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); -+ __ LoadP(r5, FieldMemOperand(r5, SharedFunctionInfo::kCodeOffset)); -+ __ addi(r5, r5, Operand(Code::kHeaderSize - kHeapObjectTag)); -+ __ mtctr(r5); -+ __ bcr(); -+} -+ -+ -+void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) { -+ GenerateTailCallToSharedCode(masm); -+} -+ -+ -+void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) { -+ { -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ -+ // Push a copy of the function onto the stack. -+ __ push(r4); -+ // Push call kind information. -+ __ push(r8); -+ -+ __ push(r4); // Function is also the parameter to the runtime call. -+ __ CallRuntime(Runtime::kParallelRecompile, 1); -+ -+ // Restore call kind information. -+ __ pop(r8); -+ // Restore receiver. -+ __ pop(r4); -+ -+ // Tear down internal frame. -+ } -+ -+ GenerateTailCallToSharedCode(masm); -+} -+ -+ -+static void Generate_JSConstructStubHelper(MacroAssembler* masm, -+ bool is_api_function, -+ bool count_constructions) { -+ // ----------- S t a t e ------------- -+ // -- r3 : number of arguments -+ // -- r4 : constructor function -+ // -- lr : return address -+ // -- sp[...]: constructor arguments -+ // ----------------------------------- -+ -+ // Should never count constructions for api objects. -+ ASSERT(!is_api_function || !count_constructions); -+ -+ Isolate* isolate = masm->isolate(); -+ -+ // Enter a construct frame. -+ { -+ FrameScope scope(masm, StackFrame::CONSTRUCT); -+ -+ // Preserve the two incoming parameters on the stack. -+ __ SmiTag(r3); -+ __ push(r3); // Smi-tagged arguments count. -+ __ push(r4); // Constructor function. -+ -+ // Try to allocate the object without transitioning into C code. If any of -+ // the preconditions is not met, the code bails out to the runtime call. -+ Label rt_call, allocated; -+ if (FLAG_inline_new) { -+ Label undo_allocation; -+#ifdef ENABLE_DEBUGGER_SUPPORT -+ ExternalReference debug_step_in_fp = -+ ExternalReference::debug_step_in_fp_address(isolate); -+ __ mov(r5, Operand(debug_step_in_fp)); -+ __ LoadP(r5, MemOperand(r5)); -+ __ cmpi(r5, Operand::Zero()); -+ __ bne(&rt_call); -+#endif -+ -+ // Load the initial map and verify that it is in fact a map. -+ // r4: constructor function -+ __ LoadP(r5, FieldMemOperand(r4, -+ JSFunction::kPrototypeOrInitialMapOffset)); -+ __ JumpIfSmi(r5, &rt_call); -+ __ CompareObjectType(r5, r6, r7, MAP_TYPE); -+ __ bne(&rt_call); -+ -+ // Check that the constructor is not constructing a JSFunction (see -+ // comments in Runtime_NewObject in runtime.cc). In which case the -+ // initial map's instance type would be JS_FUNCTION_TYPE. -+ // r4: constructor function -+ // r5: initial map -+ __ CompareInstanceType(r5, r6, JS_FUNCTION_TYPE); -+ __ beq(&rt_call); -+ -+ if (count_constructions) { -+ Label allocate; -+ // Decrease generous allocation count. -+ __ LoadP(r6, FieldMemOperand(r4, -+ JSFunction::kSharedFunctionInfoOffset)); -+ MemOperand constructor_count = -+ FieldMemOperand(r6, SharedFunctionInfo::kConstructionCountOffset); -+ __ lbz(r7, constructor_count); -+ __ addi(r7, r7, Operand(-1)); -+ __ stb(r7, constructor_count); -+ __ cmpi(r7, Operand::Zero()); -+ __ bne(&allocate); -+ -+ __ push(r4); -+ __ push(r5); -+ -+ __ push(r4); // constructor -+ // The call will replace the stub, so the countdown is only done once. -+ __ CallRuntime(Runtime::kFinalizeInstanceSize, 1); -+ -+ __ pop(r5); -+ __ pop(r4); -+ -+ __ bind(&allocate); -+ } -+ -+ // Now allocate the JSObject on the heap. -+ // r4: constructor function -+ // r5: initial map -+ __ lbz(r6, FieldMemOperand(r5, Map::kInstanceSizeOffset)); -+ __ AllocateInNewSpace(r6, r7, r8, r9, &rt_call, SIZE_IN_WORDS); -+ -+ // Allocated the JSObject, now initialize the fields. Map is set to -+ // initial map and properties and elements are set to empty fixed array. -+ // r4: constructor function -+ // r5: initial map -+ // r6: object size -+ // r7: JSObject (not tagged) -+ __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex); -+ __ mr(r8, r7); -+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); -+ __ StoreP(r5, MemOperand(r8)); -+ ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); -+ __ StorePU(r9, MemOperand(r8, kPointerSize)); -+ ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); -+ __ StorePU(r9, MemOperand(r8, kPointerSize)); -+ __ addi(r8, r8, Operand(kPointerSize)); -+ -+ // Fill all the in-object properties with the appropriate filler. -+ // r4: constructor function -+ // r5: initial map -+ // r6: object size (in words) -+ // r7: JSObject (not tagged) -+ // r8: First in-object property of JSObject (not tagged) -+ uint32_t byte; -+ __ ShiftLeftImm(r9, r6, Operand(kPointerSizeLog2)); -+ __ add(r9, r7, r9); // End of object. -+ ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); -+ __ LoadRoot(r10, Heap::kUndefinedValueRootIndex); -+ if (count_constructions) { -+ __ lwz(r3, FieldMemOperand(r5, Map::kInstanceSizesOffset)); -+ // Fetch Map::kPreAllocatedPropertyFieldsByte field from r3 -+ // and multiply by kPointerSizeLog2 -+ STATIC_ASSERT(Map::kPreAllocatedPropertyFieldsByte < 4); -+ byte = Map::kPreAllocatedPropertyFieldsByte; -+#if __BYTE_ORDER == __BIG_ENDIAN -+ byte = 3 - byte; -+#endif -+ __ ExtractBitRange(r3, r3, -+ ((byte + 1) * kBitsPerByte) - 1, -+ byte * kBitsPerByte); -+ __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2)); -+ __ add(r3, r8, r3); -+ // r3: offset of first field after pre-allocated fields -+ if (FLAG_debug_code) { -+ __ cmp(r3, r9); -+ __ Assert(le, "Unexpected number of pre-allocated property fields."); -+ } -+ __ InitializeFieldsWithFiller(r8, r3, r10); -+ // To allow for truncation. -+ __ LoadRoot(r10, Heap::kOnePointerFillerMapRootIndex); -+ } -+ __ InitializeFieldsWithFiller(r8, r9, r10); -+ -+ // Add the object tag to make the JSObject real, so that we can continue -+ // and jump into the continuation code at any time from now on. Any -+ // failures need to undo the allocation, so that the heap is in a -+ // consistent state and verifiable. -+ __ addi(r7, r7, Operand(kHeapObjectTag)); -+ -+ // Check if a non-empty properties array is needed. Continue with -+ // allocated object if not fall through to runtime call if it is. -+ // r4: constructor function -+ // r7: JSObject -+ // r8: start of next object (not tagged) -+ __ lbz(r6, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset)); -+ // The field instance sizes contains both pre-allocated property fields -+ // and in-object properties. -+ __ lwz(r3, FieldMemOperand(r5, Map::kInstanceSizesOffset)); -+ // Fetch Map::kPreAllocatedPropertyFieldsByte field from r3 -+ STATIC_ASSERT(Map::kPreAllocatedPropertyFieldsByte < 4); -+ byte = Map::kPreAllocatedPropertyFieldsByte; -+#if __BYTE_ORDER == __BIG_ENDIAN -+ byte = 3 - byte; -+#endif -+ __ ExtractBitRange(r9, r3, -+ ((byte + 1) * kBitsPerByte) - 1, -+ byte * kBitsPerByte); -+ __ add(r6, r6, r9); -+ STATIC_ASSERT(Map::kInObjectPropertiesByte < 4); -+ byte = Map::kInObjectPropertiesByte; -+#if __BYTE_ORDER == __BIG_ENDIAN -+ byte = 3 - byte; -+#endif -+ __ ExtractBitRange(r9, r3, -+ ((byte + 1) * kBitsPerByte) - 1, -+ byte * kBitsPerByte); -+ __ sub(r6, r6, r9); // roohack - sub order may be incorrect -+ __ cmpi(r6, Operand::Zero()); -+ -+ // Done if no extra properties are to be allocated. -+ __ beq(&allocated); -+ __ Assert(ge, "Property allocation count failed."); -+ -+ // Scale the number of elements by pointer size and add the header for -+ // FixedArrays to the start of the next object calculation from above. -+ // r4: constructor -+ // r6: number of elements in properties array -+ // r7: JSObject -+ // r8: start of next object -+ __ addi(r3, r6, Operand(FixedArray::kHeaderSize / kPointerSize)); -+ __ AllocateInNewSpace( -+ r3, -+ r8, -+ r9, -+ r5, -+ &undo_allocation, -+ static_cast(RESULT_CONTAINS_TOP | SIZE_IN_WORDS)); -+ -+ // Initialize the FixedArray. -+ // r4: constructor -+ // r6: number of elements in properties array -+ // r7: JSObject -+ // r8: FixedArray (not tagged) -+ __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex); -+ __ mr(r5, r8); -+ ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); -+ __ StoreP(r9, MemOperand(r5)); -+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); -+ __ SmiTag(r3, r6); -+ __ StorePU(r3, MemOperand(r5, kPointerSize)); -+ __ addi(r5, r5, Operand(kPointerSize)); -+ -+ // Initialize the fields to undefined. -+ // r4: constructor function -+ // r5: First element of FixedArray (not tagged) -+ // r6: number of elements in properties array -+ // r7: JSObject -+ // r8: FixedArray (not tagged) -+ __ ShiftLeftImm(r9, r6, Operand(kPointerSizeLog2)); -+ __ add(r9, r5, r9); // End of object. -+ ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); -+ { Label loop, entry; -+ if (count_constructions) { -+ __ LoadRoot(r10, Heap::kUndefinedValueRootIndex); -+ } else if (FLAG_debug_code) { -+ __ LoadRoot(r11, Heap::kUndefinedValueRootIndex); -+ __ cmp(r10, r11); -+ __ Assert(eq, "Undefined value not loaded."); -+ } -+ __ b(&entry); -+ __ bind(&loop); -+ __ StoreP(r10, MemOperand(r5)); -+ __ addi(r5, r5, Operand(kPointerSize)); -+ __ bind(&entry); -+ __ cmp(r5, r9); -+ __ blt(&loop); -+ } -+ -+ // Store the initialized FixedArray into the properties field of -+ // the JSObject -+ // r4: constructor function -+ // r7: JSObject -+ // r8: FixedArray (not tagged) -+ __ addi(r8, r8, Operand(kHeapObjectTag)); // Add the heap tag. -+ __ StoreP(r8, FieldMemOperand(r7, JSObject::kPropertiesOffset), r0); -+ -+ // Continue with JSObject being successfully allocated -+ // r4: constructor function -+ // r7: JSObject -+ __ b(&allocated); -+ -+ // Undo the setting of the new top so that the heap is verifiable. For -+ // example, the map's unused properties potentially do not match the -+ // allocated objects unused properties. -+ // r7: JSObject (previous new top) -+ __ bind(&undo_allocation); -+ __ UndoAllocationInNewSpace(r7, r8); -+ } -+ -+ // Allocate the new receiver object using the runtime call. -+ // r4: constructor function -+ __ bind(&rt_call); -+ __ push(r4); // argument for Runtime_NewObject -+ __ CallRuntime(Runtime::kNewObject, 1); -+ __ mr(r7, r3); -+ -+ // Receiver for constructor call allocated. -+ // r7: JSObject -+ __ bind(&allocated); -+ __ push(r7); -+ __ push(r7); -+ -+ // Reload the number of arguments and the constructor from the stack. -+ // sp[0]: receiver -+ // sp[1]: receiver -+ // sp[2]: constructor function -+ // sp[3]: number of arguments (smi-tagged) -+ __ LoadP(r4, MemOperand(sp, 2 * kPointerSize)); -+ __ LoadP(r6, MemOperand(sp, 3 * kPointerSize)); -+ -+ // Set up pointer to last argument. -+ __ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset)); -+ -+ // Set up number of arguments for function call below -+ __ SmiUntag(r3, r6); -+ -+ // Copy arguments and receiver to the expression stack. -+ // r3: number of arguments -+ // r4: constructor function -+ // r5: address of last argument (caller sp) -+ // r6: number of arguments (smi-tagged) -+ // sp[0]: receiver -+ // sp[1]: receiver -+ // sp[2]: constructor function -+ // sp[3]: number of arguments (smi-tagged) -+ Label loop, no_args; -+ __ cmpi(r3, Operand::Zero()); -+ __ beq(&no_args); -+ __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2)); -+ __ mtctr(r3); -+ __ bind(&loop); -+ __ subi(ip, ip, Operand(kPointerSize)); -+ __ LoadPX(r0, MemOperand(r5, ip)); -+ __ push(r0); -+ __ bdnz(&loop); -+ __ bind(&no_args); -+ -+ // Call the function. -+ // r3: number of arguments -+ // r4: constructor function -+ if (is_api_function) { -+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); -+ Handle code = -+ masm->isolate()->builtins()->HandleApiCallConstruct(); -+ ParameterCount expected(0); -+ __ InvokeCode(code, expected, expected, -+ RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD); -+ } else { -+ ParameterCount actual(r3); -+ __ InvokeFunction(r4, actual, CALL_FUNCTION, // roohack -+ NullCallWrapper(), CALL_AS_METHOD); -+ } -+ -+ // Store offset of return address for deoptimizer. -+ if (!is_api_function && !count_constructions) { -+ masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset()); -+ } -+ -+ // Restore context from the frame. -+ // r3: result -+ // sp[0]: receiver -+ // sp[1]: constructor function -+ // sp[2]: number of arguments (smi-tagged) -+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); -+ -+ // If the result is an object (in the ECMA sense), we should get rid -+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7 -+ // on page 74. -+ Label use_receiver, exit; -+ -+ // If the result is a smi, it is *not* an object in the ECMA sense. -+ // r3: result -+ // sp[0]: receiver (newly allocated object) -+ // sp[1]: constructor function -+ // sp[2]: number of arguments (smi-tagged) -+ __ JumpIfSmi(r3, &use_receiver); -+ -+ // If the type of the result (stored in its map) is less than -+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. -+ __ CompareObjectType(r3, r6, r6, FIRST_SPEC_OBJECT_TYPE); -+ __ bge(&exit); -+ -+ // Throw away the result of the constructor invocation and use the -+ // on-stack receiver as the result. -+ __ bind(&use_receiver); -+ __ LoadP(r3, MemOperand(sp)); -+ -+ // Remove receiver from the stack, remove caller arguments, and -+ // return. -+ __ bind(&exit); -+ // r3: result -+ // sp[0]: receiver (newly allocated object) -+ // sp[1]: constructor function -+ // sp[2]: number of arguments (smi-tagged) -+ __ LoadP(r4, MemOperand(sp, 2 * kPointerSize)); -+ -+ // Leave construct frame. -+ } -+ -+ __ SmiToPtrArrayOffset(r4, r4); -+ __ add(sp, sp, r4); -+ __ addi(sp, sp, Operand(kPointerSize)); -+ __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r4, r5); -+ __ blr(); -+} -+ -+ -+void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) { -+ Generate_JSConstructStubHelper(masm, false, true); -+} -+ -+ -+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { -+ Generate_JSConstructStubHelper(masm, false, false); -+} -+ -+ -+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) { -+ Generate_JSConstructStubHelper(masm, true, false); -+} -+ -+ -+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, -+ bool is_construct) { -+ // Called from Generate_JS_Entry -+ // r3: code entry -+ // r4: function -+ // r5: receiver -+ // r6: argc -+ // r7: argv -+ // r0,r8-r9, cp may be clobbered -+ -+ // Clear the context before we push it when entering the internal frame. -+ __ li(cp, Operand(0, RelocInfo::NONE)); -+ -+ // Enter an internal frame. -+ { -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ -+ // Set up the context from the function argument. -+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); -+ -+ __ InitializeRootRegister(); -+ -+ // Push the function and the receiver onto the stack. -+ __ push(r4); -+ __ push(r5); -+ -+ // Copy arguments to the stack in a loop. -+ // r4: function -+ // r6: argc -+ // r7: argv, i.e. points to first arg -+ Label loop, entry; -+ __ ShiftLeftImm(r0, r6, Operand(kPointerSizeLog2)); -+ __ add(r5, r7, r0); -+ // r5 points past last arg. -+ __ b(&entry); -+ __ bind(&loop); -+ __ LoadP(r8, MemOperand(r7)); // read next parameter -+ __ addi(r7, r7, Operand(kPointerSize)); -+ __ LoadP(r0, MemOperand(r8)); // dereference handle -+ __ push(r0); // push parameter -+ __ bind(&entry); -+ __ cmp(r7, r5); -+ __ bne(&loop); -+ -+ // Initialize all JavaScript callee-saved registers, since they will be seen -+ // by the garbage collector as part of handlers. -+ __ LoadRoot(r7, Heap::kUndefinedValueRootIndex); -+ __ mr(r14, r7); -+ __ mr(r15, r7); -+ __ mr(r16, r7); -+ __ mr(r22, r7); // hmmm, possibly should be reassigned to r17 -+ -+ // Invoke the code and pass argc as r3. -+ __ mr(r3, r6); -+ if (is_construct) { -+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); -+ __ CallStub(&stub); -+ } else { -+ ParameterCount actual(r3); -+ __ InvokeFunction(r4, actual, CALL_FUNCTION, -+ NullCallWrapper(), CALL_AS_METHOD); -+ } -+ // Exit the JS frame and remove the parameters (except function), and -+ // return. -+ } -+ __ blr(); -+ -+ // r3: result -+} -+ -+ -+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { -+ Generate_JSEntryTrampolineHelper(masm, false); -+} -+ -+ -+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { -+ Generate_JSEntryTrampolineHelper(masm, true); -+} -+ -+ -+void Builtins::Generate_LazyCompile(MacroAssembler* masm) { -+ // Enter an internal frame. -+ { -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ -+ // Preserve the function. -+ __ push(r4); -+ // Push call kind information. -+ __ push(r8); -+ -+ // Push the function on the stack as the argument to the runtime function. -+ __ push(r4); -+ __ CallRuntime(Runtime::kLazyCompile, 1); -+ // Calculate the entry point. -+ __ addi(r5, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); -+ -+ // Restore call kind information. -+ __ pop(r8); -+ // Restore saved function. -+ __ pop(r4); -+ -+ // Tear down internal frame. -+ } -+ -+ // Do a tail-call of the compiled function. -+ __ Jump(r5); -+} -+ -+ -+void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { -+ // Enter an internal frame. -+ { -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ -+ // Preserve the function. -+ __ push(r4); -+ // Push call kind information. -+ __ push(r8); -+ -+ // Push the function on the stack as the argument to the runtime function. -+ __ push(r4); -+ __ CallRuntime(Runtime::kLazyRecompile, 1); -+ // Calculate the entry point. -+ __ addi(r5, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); -+ -+ // Restore call kind information. -+ __ pop(r8); -+ // Restore saved function. -+ __ pop(r4); -+ -+ // Tear down internal frame. -+ } -+ -+ // Do a tail-call of the compiled function. -+ __ Jump(r5); -+} -+ -+ -+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, -+ Deoptimizer::BailoutType type) { -+ { -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ // Pass the function and deoptimization type to the runtime system. -+ __ LoadSmiLiteral(r3, Smi::FromInt(static_cast(type))); -+ __ push(r3); -+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1); -+ } -+ -+ // Get the full codegen state from the stack and untag it -> r9. -+ __ LoadP(r9, MemOperand(sp, 0 * kPointerSize)); -+ __ SmiUntag(r9); -+ // Switch on the state. -+ Label with_tos_register, unknown_state; -+ __ cmpi(r9, Operand(FullCodeGenerator::NO_REGISTERS)); -+ __ bne(&with_tos_register); -+ __ addi(sp, sp, Operand(1 * kPointerSize)); // Remove state. -+ __ Ret(); -+ -+ __ bind(&with_tos_register); -+ __ LoadP(r3, MemOperand(sp, 1 * kPointerSize)); -+ __ cmpi(r9, Operand(FullCodeGenerator::TOS_REG)); -+ __ bne(&unknown_state); -+ __ addi(sp, sp, Operand(2 * kPointerSize)); // Remove state. -+ __ Ret(); -+ -+ __ bind(&unknown_state); -+ __ stop("no cases left"); -+} -+ -+ -+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { -+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER); -+} -+ -+ -+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) { -+ Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY); -+} -+ -+ -+void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { -+ // For now, we are relying on the fact that Runtime::NotifyOSR -+ // doesn't do any garbage collection which allows us to save/restore -+ // the registers without worrying about which of them contain -+ // pointers. This seems a bit fragile. -+ __ mflr(r0); -+ RegList saved_regs = -+ (kJSCallerSaved | kCalleeSaved | r0.bit() | fp.bit()) & ~sp.bit(); -+ __ MultiPush(saved_regs); -+ { -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ __ CallRuntime(Runtime::kNotifyOSR, 0); -+ } -+ __ MultiPop(saved_regs); -+ __ mtlr(r0); -+ __ Ret(); -+} -+ -+ -+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { -+ // Lookup the function in the JavaScript frame and push it as an -+ // argument to the on-stack replacement function. -+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); -+ { -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ __ push(r3); -+ __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1); -+ } -+ -+ // If the result was -1 it means that we couldn't optimize the -+ // function. Just return and continue in the unoptimized version. -+ Label skip; -+ __ CmpSmiLiteral(r3, Smi::FromInt(-1), r0); -+ __ bne(&skip); -+ __ Ret(); -+ -+ __ bind(&skip); -+ // Untag the AST id and push it on the stack. -+ __ SmiUntag(r3); -+ __ push(r3); -+ -+ // Generate the code for doing the frame-to-frame translation using -+ // the deoptimizer infrastructure. -+ Deoptimizer::EntryGenerator generator(masm, Deoptimizer::OSR); -+ generator.Generate(); -+} -+ -+ -+void Builtins::Generate_FunctionCall(MacroAssembler* masm) { -+ // 1. Make sure we have at least one argument. -+ // r3: actual number of arguments -+ { Label done; -+ __ cmpi(r3, Operand::Zero()); -+ __ bne(&done); -+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); -+ __ push(r5); -+ __ addi(r3, r3, Operand(1)); -+ __ bind(&done); -+ } -+ -+ // 2. Get the function to call (passed as receiver) from the stack, check -+ // if it is a function. -+ // r3: actual number of arguments -+ Label slow, non_function; -+ __ ShiftLeftImm(r4, r3, Operand(kPointerSizeLog2)); -+ __ add(r4, sp, r4); -+ __ LoadP(r4, MemOperand(r4)); -+ __ JumpIfSmi(r4, &non_function); -+ __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE); -+ __ bne(&slow); -+ -+ // 3a. Patch the first argument if necessary when calling a function. -+ // r3: actual number of arguments -+ // r4: function -+ Label shift_arguments; -+ __ li(r7, Operand(0, RelocInfo::NONE)); // indicate regular JS_FUNCTION -+ { Label convert_to_object, use_global_receiver, patch_receiver; -+ // Change context eagerly in case we need the global receiver. -+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); -+ -+ // Do not transform the receiver for strict mode functions. -+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); -+ __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset)); -+ __ TestBit(r6, -+#if V8_TARGET_ARCH_PPC64 -+ SharedFunctionInfo::kStrictModeFunction, -+#else -+ SharedFunctionInfo::kStrictModeFunction + kSmiTagSize, -+#endif -+ r0); -+ __ bne(&shift_arguments, cr0); -+ -+ // Do not transform the receiver for native (Compilerhints already in r6). -+ __ TestBit(r6, -+#if V8_TARGET_ARCH_PPC64 -+ SharedFunctionInfo::kNative, -+#else -+ SharedFunctionInfo::kNative + kSmiTagSize, -+#endif -+ r0); -+ __ bne(&shift_arguments, cr0); -+ -+ // Compute the receiver in non-strict mode. -+ __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2)); -+ __ add(r5, sp, ip); -+ __ LoadP(r5, MemOperand(r5, -kPointerSize)); -+ // r3: actual number of arguments -+ // r4: function -+ // r5: first argument -+ __ JumpIfSmi(r5, &convert_to_object); -+ -+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex); -+ __ cmp(r5, r6); -+ __ beq(&use_global_receiver); -+ __ LoadRoot(r6, Heap::kNullValueRootIndex); -+ __ cmp(r5, r6); -+ __ beq(&use_global_receiver); -+ -+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); -+ __ CompareObjectType(r5, r6, r6, FIRST_SPEC_OBJECT_TYPE); -+ __ bge(&shift_arguments); -+ -+ __ bind(&convert_to_object); -+ -+ { -+ // Enter an internal frame in order to preserve argument count. -+ FrameScope scope(masm, StackFrame::INTERNAL); -+ __ SmiTag(r3); -+ __ push(r3); -+ -+ __ push(r5); -+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); -+ __ mr(r5, r3); -+ -+ __ pop(r3); -+ __ SmiUntag(r3); -+ -+ // Exit the internal frame. -+ } -+ -+ // Restore the function to r4, and the flag to r7. -+ __ ShiftLeftImm(r7, r3, Operand(kPointerSizeLog2)); -+ __ add(r7, sp, r7); -+ __ LoadP(r4, MemOperand(r7)); -+ __ li(r7, Operand(0, RelocInfo::NONE)); -+ __ b(&patch_receiver); -+ -+ // Use the global receiver object from the called function as the -+ // receiver. -+ __ bind(&use_global_receiver); -+ const int kGlobalIndex = -+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; -+ __ LoadP(r5, FieldMemOperand(cp, kGlobalIndex)); -+ __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kNativeContextOffset)); -+ __ LoadP(r5, FieldMemOperand(r5, kGlobalIndex)); -+ __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalReceiverOffset)); -+ -+ __ bind(&patch_receiver); -+ __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2)); -+ __ add(r6, sp, ip); -+ __ StoreP(r5, MemOperand(r6, -kPointerSize)); -+ -+ __ b(&shift_arguments); -+ } -+ -+ // 3b. Check for function proxy. -+ __ bind(&slow); -+ __ li(r7, Operand(1, RelocInfo::NONE)); // indicate function proxy -+ __ cmpi(r5, Operand(JS_FUNCTION_PROXY_TYPE)); -+ __ beq(&shift_arguments); -+ __ bind(&non_function); -+ __ li(r7, Operand(2, RelocInfo::NONE)); // indicate non-function -+ -+ // 3c. Patch the first argument when calling a non-function. The -+ // CALL_NON_FUNCTION builtin expects the non-function callee as -+ // receiver, so overwrite the first argument which will ultimately -+ // become the receiver. -+ // r3: actual number of arguments -+ // r4: function -+ // r7: call type (0: JS function, 1: function proxy, 2: non-function) -+ __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2)); -+ __ add(r5, sp, ip); -+ __ StoreP(r4, MemOperand(r5, -kPointerSize)); -+ -+ // 4. Shift arguments and return address one slot down on the stack -+ // (overwriting the original receiver). Adjust argument count to make -+ // the original first argument the new receiver. -+ // r3: actual number of arguments -+ // r4: function -+ // r7: call type (0: JS function, 1: function proxy, 2: non-function) -+ __ bind(&shift_arguments); -+ { Label loop; -+ // Calculate the copy start address (destination). Copy end address is sp. -+ __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2)); -+ __ add(r5, sp, ip); -+ -+ __ bind(&loop); -+ __ LoadP(ip, MemOperand(r5, -kPointerSize)); -+ __ StoreP(ip, MemOperand(r5)); -+ __ subi(r5, r5, Operand(kPointerSize)); -+ __ cmp(r5, sp); -+ __ bne(&loop); -+ // Adjust the actual number of arguments and remove the top element -+ // (which is a copy of the last argument). -+ __ subi(r3, r3, Operand(1)); -+ __ pop(); -+ } -+ -+ // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin, -+ // or a function proxy via CALL_FUNCTION_PROXY. -+ // r3: actual number of arguments -+ // r4: function -+ // r7: call type (0: JS function, 1: function proxy, 2: non-function) -+ { Label function, non_proxy; -+ __ cmpi(r7, Operand::Zero()); -+ __ beq(&function); -+ // Expected number of arguments is 0 for CALL_NON_FUNCTION. -+ __ li(r5, Operand(0, RelocInfo::NONE)); -+ __ SetCallKind(r8, CALL_AS_METHOD); -+ __ cmpi(r7, Operand(1)); -+ __ bne(&non_proxy); -+ -+ __ push(r4); // re-add proxy object as additional argument -+ __ addi(r3, r3, Operand(1)); -+ __ GetBuiltinEntry(r6, Builtins::CALL_FUNCTION_PROXY); -+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), -+ RelocInfo::CODE_TARGET); -+ -+ __ bind(&non_proxy); -+ __ GetBuiltinEntry(r6, Builtins::CALL_NON_FUNCTION); -+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), -+ RelocInfo::CODE_TARGET); -+ __ bind(&function); -+ } -+ -+ // 5b. Get the code to call from the function and check that the number of -+ // expected arguments matches what we're providing. If so, jump -+ // (tail-call) to the code in register edx without checking arguments. -+ // r3: actual number of arguments -+ // r4: function -+ __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); -+ __ LoadWordArith(r5, -+ FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset)); -+#if !defined(V8_TARGET_ARCH_PPC64) -+ __ SmiUntag(r5); -+#endif -+ __ LoadP(r6, FieldMemOperand(r4, JSFunction::kCodeEntryOffset)); -+ __ SetCallKind(r8, CALL_AS_METHOD); -+ __ cmp(r5, r3); // Check formal and actual parameter counts. -+ Label skip; -+ __ beq(&skip); -+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), -+ RelocInfo::CODE_TARGET); -+ -+ __ bind(&skip); -+ ParameterCount expected(0); -+ __ InvokeCode(r6, expected, expected, JUMP_FUNCTION, -+ NullCallWrapper(), CALL_AS_METHOD); -+} -+ -+ -+void Builtins::Generate_FunctionApply(MacroAssembler* masm) { -+ const int kIndexOffset = -5 * kPointerSize; -+ const int kLimitOffset = -4 * kPointerSize; -+ const int kArgsOffset = 2 * kPointerSize; -+ const int kRecvOffset = 3 * kPointerSize; -+ const int kFunctionOffset = 4 * kPointerSize; -+ -+ { -+ FrameScope frame_scope(masm, StackFrame::INTERNAL); -+ -+ __ LoadP(r3, MemOperand(fp, kFunctionOffset)); // get the function -+ __ push(r3); -+ __ LoadP(r3, MemOperand(fp, kArgsOffset)); // get the args array -+ __ push(r3); -+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); -+ -+ // Check the stack for overflow. We are not trying to catch -+ // interruptions (e.g. debug break and preemption) here, so the "real stack -+ // limit" is checked. -+ Label okay; -+ __ LoadRoot(r5, Heap::kRealStackLimitRootIndex); -+ // Make r5 the space we have left. The stack might already be overflowed -+ // here which will cause r5 to become negative. -+ __ sub(r5, sp, r5); -+ // Check if the arguments will overflow the stack. -+ __ SmiToPtrArrayOffset(r0, r3); -+ __ cmp(r5, r0); -+ __ bgt(&okay); // Signed comparison. -+ -+ // Out of stack space. -+ __ LoadP(r4, MemOperand(fp, kFunctionOffset)); -+ __ push(r4); -+ __ push(r3); -+ __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); -+ // End of stack check. -+ -+ // Push current limit and index. -+ __ bind(&okay); -+ __ push(r3); // limit -+ __ li(r4, Operand(0, RelocInfo::NONE)); // initial index -+ __ push(r4); -+ -+ // Get the receiver. -+ __ LoadP(r3, MemOperand(fp, kRecvOffset)); -+ -+ // Check that the function is a JS function (otherwise it must be a proxy). -+ Label push_receiver; -+ __ LoadP(r4, MemOperand(fp, kFunctionOffset)); -+ __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE); -+ __ bne(&push_receiver); -+ -+ // Change context eagerly to get the right global object if necessary. -+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); -+ // Load the shared function info while the function is still in r4. -+ __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); -+ -+ // Compute the receiver. -+ // Do not transform the receiver for strict mode functions. -+ Label call_to_object, use_global_receiver; -+ __ lwz(r5, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset)); -+ __ TestBit(r5, -+#if V8_TARGET_ARCH_PPC64 -+ SharedFunctionInfo::kStrictModeFunction, -+#else -+ SharedFunctionInfo::kStrictModeFunction + kSmiTagSize, -+#endif -+ r0); -+ __ bne(&push_receiver, cr0); -+ -+ // Do not transform the receiver for strict mode functions. -+ __ TestBit(r5, -+#if V8_TARGET_ARCH_PPC64 -+ SharedFunctionInfo::kNative, -+#else -+ SharedFunctionInfo::kNative + kSmiTagSize, -+#endif -+ r0); -+ __ bne(&push_receiver, cr0); -+ -+ // Compute the receiver in non-strict mode. -+ __ JumpIfSmi(r3, &call_to_object); -+ __ LoadRoot(r4, Heap::kNullValueRootIndex); -+ __ cmp(r3, r4); -+ __ beq(&use_global_receiver); -+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); -+ __ cmp(r3, r4); -+ __ beq(&use_global_receiver); -+ -+ // Check if the receiver is already a JavaScript object. -+ // r3: receiver -+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); -+ __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE); -+ __ bge(&push_receiver); -+ -+ // Convert the receiver to a regular object. -+ // r3: receiver -+ __ bind(&call_to_object); -+ __ push(r3); -+ __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); -+ __ b(&push_receiver); -+ -+ // Use the current global receiver object as the receiver. -+ __ bind(&use_global_receiver); -+ const int kGlobalOffset = -+ Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; -+ __ LoadP(r3, FieldMemOperand(cp, kGlobalOffset)); -+ __ LoadP(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset)); -+ __ LoadP(r3, FieldMemOperand(r3, kGlobalOffset)); -+ __ LoadP(r3, FieldMemOperand(r3, GlobalObject::kGlobalReceiverOffset)); -+ -+ // Push the receiver. -+ // r3: receiver -+ __ bind(&push_receiver); -+ __ push(r3); -+ -+ // Copy all arguments from the array to the stack. -+ Label entry, loop; -+ __ LoadP(r3, MemOperand(fp, kIndexOffset)); -+ __ b(&entry); -+ -+ // Load the current argument from the arguments array and push it to the -+ // stack. -+ // r3: current argument index -+ __ bind(&loop); -+ __ LoadP(r4, MemOperand(fp, kArgsOffset)); -+ __ push(r4); -+ __ push(r3); -+ -+ // Call the runtime to access the property in the arguments array. -+ __ CallRuntime(Runtime::kGetProperty, 2); -+ __ push(r3); -+ -+ // Use inline caching to access the arguments. -+ __ LoadP(r3, MemOperand(fp, kIndexOffset)); -+ __ AddSmiLiteral(r3, r3, Smi::FromInt(1), r0); -+ __ StoreP(r3, MemOperand(fp, kIndexOffset)); -+ -+ // Test if the copy loop has finished copying all the elements from the -+ // arguments object. -+ __ bind(&entry); -+ __ LoadP(r4, MemOperand(fp, kLimitOffset)); -+ __ cmp(r3, r4); -+ __ bne(&loop); -+ -+ // Invoke the function. -+ Label call_proxy; -+ ParameterCount actual(r3); -+ __ SmiUntag(r3); -+ __ LoadP(r4, MemOperand(fp, kFunctionOffset)); -+ __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE); -+ __ bne(&call_proxy); -+ __ InvokeFunction(r4, actual, CALL_FUNCTION, -+ NullCallWrapper(), CALL_AS_METHOD); -+ -+ frame_scope.GenerateLeaveFrame(); -+ __ addi(sp, sp, Operand(3 * kPointerSize)); -+ __ blr(); -+ -+ // Invoke the function proxy. -+ __ bind(&call_proxy); -+ __ push(r4); // add function proxy as last argument -+ __ addi(r3, r3, Operand(1)); -+ __ li(r5, Operand(0, RelocInfo::NONE)); -+ __ SetCallKind(r8, CALL_AS_METHOD); -+ __ GetBuiltinEntry(r6, Builtins::CALL_FUNCTION_PROXY); -+ __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), -+ RelocInfo::CODE_TARGET); -+ -+ // Tear down the internal frame and remove function, receiver and args. -+ } -+ __ addi(sp, sp, Operand(3 * kPointerSize)); -+ __ blr(); -+} -+ -+ -+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { -+ __ SmiTag(r3); -+ __ LoadSmiLiteral(r7, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); -+ __ mflr(r0); -+ __ push(r0); -+ __ Push(fp, r7, r4, r3); -+ __ addi(fp, sp, Operand(3 * kPointerSize)); -+} -+ -+ -+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { -+ // ----------- S t a t e ------------- -+ // -- r3 : result being passed through -+ // ----------------------------------- -+ // Get the number of arguments passed (as a smi), tear down the frame and -+ // then tear down the parameters. -+ __ LoadP(r4, MemOperand(fp, -3 * kPointerSize)); -+ __ mr(sp, fp); -+ __ LoadP(fp, MemOperand(sp)); -+ __ LoadP(r0, MemOperand(sp, kPointerSize)); -+ __ mtlr(r0); -+ __ SmiToPtrArrayOffset(r0, r4); -+ __ add(sp, sp, r0); -+ __ addi(sp, sp, Operand(3 * kPointerSize)); // adjust for receiver + fp + lr -+} -+ -+ -+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { -+ // ----------- S t a t e ------------- -+ // -- r3 : actual number of arguments -+ // -- r4 : function (passed through to callee) -+ // -- r5 : expected number of arguments -+ // -- r6 : code entry to call -+ // -- r8 : call kind information -+ // ----------------------------------- -+ -+ Label invoke, dont_adapt_arguments; -+ -+ Label enough, too_few; -+ __ cmp(r3, r5); -+ __ blt(&too_few); -+ __ cmpi(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel)); -+ __ beq(&dont_adapt_arguments); -+ -+ { // Enough parameters: actual >= expected -+ __ bind(&enough); -+ EnterArgumentsAdaptorFrame(masm); -+ -+ // Calculate copy start address into r3 and copy end address into r5. -+ // r3: actual number of arguments as a smi -+ // r4: function -+ // r5: expected number of arguments -+ // r6: code entry to call -+ __ SmiToPtrArrayOffset(r3, r3); -+ __ add(r3, r3, fp); -+ // adjust for return address and receiver -+ __ addi(r3, r3, Operand(2 * kPointerSize)); -+ __ ShiftLeftImm(r5, r5, Operand(kPointerSizeLog2)); -+ __ sub(r5, r3, r5); -+ -+ // Copy the arguments (including the receiver) to the new stack frame. -+ // r3: copy start address -+ // r4: function -+ // r5: copy end address -+ // r6: code entry to call -+ -+ Label copy; -+ __ bind(©); -+ __ LoadP(ip, MemOperand(r3, 0)); -+ __ push(ip); -+ __ cmp(r3, r5); // Compare before moving to next argument. -+ __ subi(r3, r3, Operand(kPointerSize)); -+ __ bne(©); -+ -+ __ b(&invoke); -+ } -+ -+ { // Too few parameters: Actual < expected -+ __ bind(&too_few); -+ EnterArgumentsAdaptorFrame(masm); -+ -+ // Calculate copy start address into r0 and copy end address is fp. -+ // r3: actual number of arguments as a smi -+ // r4: function -+ // r5: expected number of arguments -+ // r6: code entry to call -+ __ SmiToPtrArrayOffset(r3, r3); -+ __ add(r3, r3, fp); -+ -+ // Copy the arguments (including the receiver) to the new stack frame. -+ // r3: copy start address -+ // r4: function -+ // r5: expected number of arguments -+ // r6: code entry to call -+ Label copy; -+ __ bind(©); -+ // Adjust load for return address and receiver. -+ __ LoadP(ip, MemOperand(r3, 2 * kPointerSize)); -+ __ push(ip); -+ __ cmp(r3, fp); // Compare before moving to next argument. -+ __ subi(r3, r3, Operand(kPointerSize)); -+ __ bne(©); -+ -+ // Fill the remaining expected arguments with undefined. -+ // r4: function -+ // r5: expected number of arguments -+ // r6: code entry to call -+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); -+ __ ShiftLeftImm(r5, r5, Operand(kPointerSizeLog2)); -+ __ sub(r5, fp, r5); -+ __ subi(r5, r5, Operand(4 * kPointerSize)); // Adjust for frame. -+ -+ Label fill; -+ __ bind(&fill); -+ __ push(ip); -+ __ cmp(sp, r5); -+ __ bne(&fill); -+ } -+ -+ // Call the entry point. -+ __ bind(&invoke); -+ __ Call(r6); -+ -+ // Store offset of return address for deoptimizer. -+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset()); -+ -+ // Exit frame and return. -+ LeaveArgumentsAdaptorFrame(masm); -+ __ blr(); -+ -+ -+ // ------------------------------------------- -+ // Dont adapt arguments. -+ // ------------------------------------------- -+ __ bind(&dont_adapt_arguments); -+ __ Jump(r6); -+} -+ -+ -+#undef __ -+ -+} } // namespace v8::internal -+ -+#endif // V8_TARGET_ARCH_PPC -diff -up v8-3.14.5.10/src/ppc/codegen-ppc.cc.ppc v8-3.14.5.10/src/ppc/codegen-ppc.cc ---- v8-3.14.5.10/src/ppc/codegen-ppc.cc.ppc 2016-06-07 14:15:45.990393008 -0400 -+++ v8-3.14.5.10/src/ppc/codegen-ppc.cc 2016-06-07 14:15:45.990393008 -0400 -@@ -0,0 +1,492 @@ -+// Copyright 2012 the V8 project authors. All rights reserved. -+// -+// Copyright IBM Corp. 2012, 2013. All rights reserved. -+// -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions are -+// met: -+// -+// * Redistributions of source code must retain the above copyright -+// notice, this list of conditions and the following disclaimer. -+// * Redistributions in binary form must reproduce the above -+// copyright notice, this list of conditions and the following -+// disclaimer in the documentation and/or other materials provided -+// with the distribution. -+// * Neither the name of Google Inc. nor the names of its -+// contributors may be used to endorse or promote products derived -+// from this software without specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+#include "v8.h" -+ -+#if defined(V8_TARGET_ARCH_PPC) -+ -+#include "codegen.h" -+#include "macro-assembler.h" -+ -+namespace v8 { -+namespace internal { -+ -+#define __ ACCESS_MASM(masm) -+ -+UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { -+ switch (type) { -+ case TranscendentalCache::SIN: return &sin; -+ case TranscendentalCache::COS: return &cos; -+ case TranscendentalCache::TAN: return &tan; -+ case TranscendentalCache::LOG: return &log; -+ default: UNIMPLEMENTED(); -+ } -+ return NULL; -+} -+ -+ -+UnaryMathFunction CreateSqrtFunction() { -+ return &sqrt; -+} -+ -+// ------------------------------------------------------------------------- -+// Platform-specific RuntimeCallHelper functions. -+ -+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { -+ masm->EnterFrame(StackFrame::INTERNAL); -+ ASSERT(!masm->has_frame()); -+ masm->set_has_frame(true); -+} -+ -+ -+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { -+ masm->LeaveFrame(StackFrame::INTERNAL); -+ ASSERT(masm->has_frame()); -+ masm->set_has_frame(false); -+} -+ -+ -+// ------------------------------------------------------------------------- -+// Code generators -+ -+void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( -+ MacroAssembler* masm) { -+ // ----------- S t a t e ------------- -+ // -- r3 : value -+ // -- r4 : key -+ // -- r5 : receiver -+ // -- lr : return address -+ // -- r6 : target map, scratch for subsequent call -+ // -- r7 : scratch (elements) -+ // ----------------------------------- -+ // Set transitioned map. -+ __ StoreP(r6, FieldMemOperand(r5, HeapObject::kMapOffset), r0); -+ __ RecordWriteField(r5, -+ HeapObject::kMapOffset, -+ r6, -+ r22, -+ kLRHasNotBeenSaved, -+ kDontSaveFPRegs, -+ EMIT_REMEMBERED_SET, -+ OMIT_SMI_CHECK); -+} -+ -+ -+void ElementsTransitionGenerator::GenerateSmiToDouble( -+ MacroAssembler* masm, Label* fail) { -+ // ----------- S t a t e ------------- -+ // -- r3 : value -+ // -- r4 : key -+ // -- r5 : receiver -+ // -- lr : return address -+ // -- r6 : target map, scratch for subsequent call -+ // -- r7 : scratch (elements) -+ // ----------------------------------- -+ Label loop, entry, convert_hole, gc_required, only_change_map, done; -+ -+ // Check for empty arrays, which only require a map transition and no changes -+ // to the backing store. -+ __ LoadP(r7, FieldMemOperand(r5, JSObject::kElementsOffset)); -+ __ CompareRoot(r7, Heap::kEmptyFixedArrayRootIndex); -+ __ beq(&only_change_map); -+ -+ // Preserve lr and use r30 as a temporary register. -+ __ mflr(r0); -+ __ Push(r0, r30); -+ -+ __ LoadP(r8, FieldMemOperand(r7, FixedArray::kLengthOffset)); -+ // r7: source FixedArray -+ // r8: number of elements (smi-tagged) -+ -+ // Allocate new FixedDoubleArray. -+ __ SmiToDoubleArrayOffset(r30, r8); -+ __ addi(r30, r30, Operand(FixedDoubleArray::kHeaderSize + kPointerSize)); -+ __ AllocateInNewSpace(r30, r9, r10, r22, &gc_required, NO_ALLOCATION_FLAGS); -+ // r9: destination FixedDoubleArray, not tagged as heap object. -+ -+ // Align the array conveniently for doubles. -+ // Store a filler value in the unused memory. -+ Label aligned, aligned_done; -+ __ andi(r0, r9, Operand(kDoubleAlignmentMask)); -+ __ mov(ip, Operand(masm->isolate()->factory()->one_pointer_filler_map())); -+ __ beq(&aligned, cr0); -+ // Store at the beginning of the allocated memory and update the base pointer. -+ __ StoreP(ip, MemOperand(r9)); -+ __ addi(r9, r9, Operand(kPointerSize)); -+ __ b(&aligned_done); -+ -+ __ bind(&aligned); -+ // Store the filler at the end of the allocated memory. -+ __ subi(r30, r30, Operand(kPointerSize)); -+ __ StorePX(ip, MemOperand(r9, r30)); -+ -+ __ bind(&aligned_done); -+ -+ // Set destination FixedDoubleArray's length and map. -+ __ LoadRoot(r22, Heap::kFixedDoubleArrayMapRootIndex); -+ __ StoreP(r8, MemOperand(r9, FixedDoubleArray::kLengthOffset)); -+ // Update receiver's map. -+ __ StoreP(r22, MemOperand(r9, HeapObject::kMapOffset)); -+ -+ __ StoreP(r6, FieldMemOperand(r5, HeapObject::kMapOffset), r0); -+ __ RecordWriteField(r5, -+ HeapObject::kMapOffset, -+ r6, -+ r22, -+ kLRHasBeenSaved, -+ kDontSaveFPRegs, -+ OMIT_REMEMBERED_SET, -+ OMIT_SMI_CHECK); -+ // Replace receiver's backing store with newly created FixedDoubleArray. -+ __ addi(r6, r9, Operand(kHeapObjectTag)); -+ __ StoreP(r6, FieldMemOperand(r5, JSObject::kElementsOffset), r0); -+ __ RecordWriteField(r5, -+ JSObject::kElementsOffset, -+ r6, -+ r22, -+ kLRHasBeenSaved, -+ kDontSaveFPRegs, -+ EMIT_REMEMBERED_SET, -+ OMIT_SMI_CHECK); -+ -+ // Prepare for conversion loop. -+ __ addi(r6, r7, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); -+ __ addi(r10, r9, Operand(FixedDoubleArray::kHeaderSize)); -+ __ SmiToDoubleArrayOffset(r9, r8); -+ __ add(r9, r10, r9); -+#if V8_TARGET_ARCH_PPC64 -+ __ mov(r7, Operand(kHoleNanInt64)); -+#else -+ __ mov(r7, Operand(kHoleNanLower32)); -+ __ mov(r8, Operand(kHoleNanUpper32)); -+#endif -+ // r6: begin of source FixedArray element fields, not tagged -+ // r7: kHoleNanLower32 -+ // r8: kHoleNanUpper32 -+ // r9: end of destination FixedDoubleArray, not tagged -+ // r10: begin of FixedDoubleArray element fields, not tagged -+ -+ __ b(&entry); -+ -+ __ bind(&only_change_map); -+ __ StoreP(r6, FieldMemOperand(r5, HeapObject::kMapOffset), r0); -+ __ RecordWriteField(r5, -+ HeapObject::kMapOffset, -+ r6, -+ r22, -+ kLRHasBeenSaved, -+ kDontSaveFPRegs, -+ OMIT_REMEMBERED_SET, -+ OMIT_SMI_CHECK); -+ __ b(&done); -+ -+ // Call into runtime if GC is required. -+ __ bind(&gc_required); -+ __ Pop(r0, r30); -+ __ mtlr(r0); -+ __ b(fail); -+ -+ // Convert and copy elements. -+ __ bind(&loop); -+ __ LoadP(r22, MemOperand(r6)); -+ __ addi(r6, r6, Operand(kPointerSize)); -+ // r22: current element -+ __ UntagAndJumpIfNotSmi(r22, r22, &convert_hole); -+ -+ // Normal smi, convert to double and store. -+ FloatingPointHelper::ConvertIntToDouble( -+ masm, r22, d0); -+ __ stfd(d0, MemOperand(r10, 0)); -+ __ addi(r10, r10, Operand(8)); -+ -+ __ b(&entry); -+ -+ // Hole found, store the-hole NaN. -+ __ bind(&convert_hole); -+ if (FLAG_debug_code) { -+ // Restore a "smi-untagged" heap object. -+ __ LoadP(r22, MemOperand(r6, -kPointerSize)); -+ __ CompareRoot(r22, Heap::kTheHoleValueRootIndex); -+ __ Assert(eq, "object found in smi-only array"); -+ } -+#if V8_TARGET_ARCH_PPC64 -+ __ std(r7, MemOperand(r10, 0)); -+#else -+#if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN -+ __ stw(r7, MemOperand(r10, 0)); -+ __ stw(r8, MemOperand(r10, 4)); -+#else -+ __ stw(r8, MemOperand(r10, 0)); -+ __ stw(r7, MemOperand(r10, 4)); -+#endif -+#endif -+ __ addi(r10, r10, Operand(8)); -+ -+ __ bind(&entry); -+ __ cmp(r10, r9); -+ __ blt(&loop); -+ -+ __ Pop(r0, r30); -+ __ mtlr(r0); -+ __ bind(&done); -+} -+ -+ -+void ElementsTransitionGenerator::GenerateDoubleToObject( -+ MacroAssembler* masm, Label* fail) { -+ // ----------- S t a t e ------------- -+ // -- r3 : value -+ // -- r4 : key -+ // -- r5 : receiver -+ // -- lr : return address -+ // -- r6 : target map, scratch for subsequent call -+ // -- r7 : scratch (elements) -+ // ----------------------------------- -+ Label entry, loop, convert_hole, gc_required, only_change_map; -+ -+ // Check for empty arrays, which only require a map transition and no changes -+ // to the backing store. -+ __ LoadP(r7, FieldMemOperand(r5, JSObject::kElementsOffset)); -+ __ CompareRoot(r7, Heap::kEmptyFixedArrayRootIndex); -+ __ beq(&only_change_map); -+ -+ __ Push(r6, r5, r4, r3); -+ __ LoadP(r8, FieldMemOperand(r7, FixedArray::kLengthOffset)); -+ // r7: source FixedDoubleArray -+ // r8: number of elements (smi-tagged) -+ -+ // Allocate new FixedArray. -+ __ li(r3, Operand(FixedDoubleArray::kHeaderSize)); -+ __ SmiToPtrArrayOffset(r0, r8); -+ __ add(r3, r3, r0); -+ __ AllocateInNewSpace(r3, r9, r10, r22, &gc_required, NO_ALLOCATION_FLAGS); -+ // r9: destination FixedArray, not tagged as heap object -+ // Set destination FixedDoubleArray's length and map. -+ __ LoadRoot(r22, Heap::kFixedArrayMapRootIndex); -+ __ StoreP(r8, MemOperand(r9, FixedDoubleArray::kLengthOffset)); -+ __ StoreP(r22, MemOperand(r9, HeapObject::kMapOffset)); -+ -+ // Prepare for conversion loop. -+ __ addi(r7, r7, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); -+ __ addi(r6, r9, Operand(FixedArray::kHeaderSize)); -+ __ addi(r9, r9, Operand(kHeapObjectTag)); -+ __ SmiToPtrArrayOffset(r8, r8); -+ __ add(r8, r6, r8); -+ __ LoadRoot(r10, Heap::kTheHoleValueRootIndex); -+ __ LoadRoot(r22, Heap::kHeapNumberMapRootIndex); -+ // Using offsetted addresses in r7 to fully take advantage of post-indexing. -+ // r6: begin of destination FixedArray element fields, not tagged -+ // r7: begin of source FixedDoubleArray element fields, not tagged -+ // r8: end of destination FixedArray, not tagged -+ // r9: destination FixedArray -+ // r10: the-hole pointer -+ // r22: heap number map -+ __ b(&entry); -+ -+ // Call into runtime if GC is required. -+ __ bind(&gc_required); -+ __ Pop(r6, r5, r4, r3); -+ __ b(fail); -+ -+ __ bind(&loop); -+#if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN -+ __ lwz(r4, MemOperand(r7, 4)); -+#else -+ __ lwz(r4, MemOperand(r7)); -+#endif -+ __ addi(r7, r7, Operand(8)); -+ // r4: current element's upper 32 bit -+ // r7: address of next element's upper 32 bit -+ __ Cmpi(r4, Operand(kHoleNanUpper32), r0); -+ __ beq(&convert_hole); -+ -+ // Non-hole double, copy value into a heap number. -+ __ AllocateHeapNumber(r5, r3, r4, r22, &gc_required); -+ // r5: new heap number -+#if V8_TARGET_ARCH_PPC64 -+ __ ld(r3, MemOperand(r7, -8)); -+ __ addi(r4, r5, Operand(-1)); // subtract tag for std -+ __ std(r3, MemOperand(r4, HeapNumber::kValueOffset)); -+#else -+#if __FLOAT_WORD_ORDER == __LITTLE_ENDIAN -+ __ lwz(r3, MemOperand(r7, -8)); -+ __ lwz(r4, MemOperand(r7, -4)); -+ __ stw(r3, FieldMemOperand(r5, HeapNumber::kValueOffset)); -+ __ stw(r4, FieldMemOperand(r5, HeapNumber::kValueOffset+4)); -+#else -+ __ lwz(r3, MemOperand(r7, -4)); -+ __ lwz(r4, MemOperand(r7, -8)); -+ __ stw(r3, FieldMemOperand(r5, HeapNumber::kValueOffset+4)); -+ __ stw(r4, FieldMemOperand(r5, HeapNumber::kValueOffset)); -+#endif -+#endif -+ __ mr(r3, r6); -+ __ StoreP(r5, MemOperand(r6)); -+ __ addi(r6, r6, Operand(kPointerSize)); -+ __ RecordWrite(r9, -+ r3, -+ r5, -+ kLRHasNotBeenSaved, -+ kDontSaveFPRegs, -+ EMIT_REMEMBERED_SET, -+ OMIT_SMI_CHECK); -+ __ b(&entry); -+ -+ // Replace the-hole NaN with the-hole pointer. -+ __ bind(&convert_hole); -+ __ StoreP(r10, MemOperand(r6)); -+ __ addi(r6, r6, Operand(kPointerSize)); -+ -+ __ bind(&entry); -+ __ cmpl(r6, r8); -+ __ blt(&loop); -+ -+ __ Pop(r6, r5, r4, r3); -+ // Replace receiver's backing store with newly created and filled FixedArray. -+ __ StoreP(r9, FieldMemOperand(r5, JSObject::kElementsOffset), r0); -+ __ RecordWriteField(r5, -+ JSObject::kElementsOffset, -+ r9, -+ r22, -+ kLRHasNotBeenSaved, -+ kDontSaveFPRegs, -+ EMIT_REMEMBERED_SET, -+ OMIT_SMI_CHECK); -+ -+ __ bind(&only_change_map); -+ // Update receiver's map. -+ __ StoreP(r6, FieldMemOperand(r5, HeapObject::kMapOffset), r0); -+ __ RecordWriteField(r5, -+ HeapObject::kMapOffset, -+ r6, -+ r22, -+ kLRHasNotBeenSaved, -+ kDontSaveFPRegs, -+ OMIT_REMEMBERED_SET, -+ OMIT_SMI_CHECK); -+} -+ -+ -+// roohack - assume ip can be used as a scratch register below -+void StringCharLoadGenerator::Generate(MacroAssembler* masm, -+ Register string, -+ Register index, -+ Register result, -+ Label* call_runtime) { -+ // Fetch the instance type of the receiver into result register. -+ __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset)); -+ __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); -+ -+ // We need special handling for indirect strings. -+ Label check_sequential; -+ __ andi(r0, result, Operand(kIsIndirectStringMask)); -+ __ beq(&check_sequential, cr0); -+ -+ // Dispatch on the indirect string shape: slice or cons. -+ Label cons_string; -+ __ mov(ip, Operand(kSlicedNotConsMask)); -+ __ and_(r0, result, ip, SetRC); -+ __ beq(&cons_string, cr0); -+ -+ // Handle slices. -+ Label indirect_string_loaded; -+ __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset)); -+ __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset)); -+ __ SmiUntag(ip, result); -+ __ add(index, index, ip); -+ __ b(&indirect_string_loaded); -+ -+ // Handle cons strings. -+ // Check whether the right hand side is the empty string (i.e. if -+ // this is really a flat string in a cons string). If that is not -+ // the case we would rather go to the runtime system now to flatten -+ // the string. -+ __ bind(&cons_string); -+ __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset)); -+ __ CompareRoot(result, Heap::kEmptyStringRootIndex); -+ __ bne(call_runtime); -+ // Get the first of the two strings and load its instance type. -+ __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset)); -+ -+ __ bind(&indirect_string_loaded); -+ __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset)); -+ __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); -+ -+ // Distinguish sequential and external strings. Only these two string -+ // representations can reach here (slices and flat cons strings have been -+ // reduced to the underlying sequential or external string). -+ Label external_string, check_encoding; -+ __ bind(&check_sequential); -+ STATIC_ASSERT(kSeqStringTag == 0); -+ __ andi(r0, result, Operand(kStringRepresentationMask)); -+ __ bne(&external_string, cr0); -+ -+ // Prepare sequential strings -+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); -+ __ addi(string, -+ string, -+ Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); -+ __ b(&check_encoding); -+ -+ // Handle external strings. -+ __ bind(&external_string); -+ if (FLAG_debug_code) { -+ // Assert that we do not have a cons or slice (indirect strings) here. -+ // Sequential strings have already been ruled out. -+ __ andi(r0, result, Operand(kIsIndirectStringMask)); -+ __ Assert(eq, "external string expected, but not found", cr0); -+ } -+ // Rule out short external strings. -+ STATIC_CHECK(kShortExternalStringTag != 0); -+ __ andi(r0, result, Operand(kShortExternalStringMask)); -+ __ bne(call_runtime, cr0); -+ __ LoadP(string, -+ FieldMemOperand(string, ExternalString::kResourceDataOffset)); -+ -+ Label ascii, done; -+ __ bind(&check_encoding); -+ STATIC_ASSERT(kTwoByteStringTag == 0); -+ __ andi(r0, result, Operand(kStringEncodingMask)); -+ __ bne(&ascii, cr0); -+ // Two-byte string. -+ __ ShiftLeftImm(result, index, Operand(1)); -+ __ lhzx(result, MemOperand(string, result)); -+ __ b(&done); -+ __ bind(&ascii); -+ // Ascii string. -+ __ lbzx(result, MemOperand(string, index)); -+ __ bind(&done); -+} -+ -+#undef __ -+ -+} } // namespace v8::internal -+ -+#endif // V8_TARGET_ARCH_PPC -diff -up v8-3.14.5.10/src/ppc/codegen-ppc.h.ppc v8-3.14.5.10/src/ppc/codegen-ppc.h ---- v8-3.14.5.10/src/ppc/codegen-ppc.h.ppc 2016-06-07 14:15:45.990393008 -0400 -+++ v8-3.14.5.10/src/ppc/codegen-ppc.h 2016-06-07 14:15:45.990393008 -0400 -@@ -0,0 +1,96 @@ -+// Copyright 2011 the V8 project authors. All rights reserved. -+// -+// Copyright IBM Corp. 2012, 2013. All rights reserved. -+// -+// Redistribution and use in source and binary forms, with or without -+// modification, are permitted provided that the following conditions are -+// met: -+// -+// * Redistributions of source code must retain the above copyright -+// notice, this list of conditions and the following disclaimer. -+// * Redistributions in binary form must reproduce the above -+// copyright notice, this list of conditions and the following -+// disclaimer in the documentation and/or other materials provided -+// with the distribution. -+// * Neither the name of Google Inc. nor the names of its -+// contributors may be used to endorse or promote products derived -+// from this software without specific prior written permission. -+// -+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ -+#ifndef V8_PPC_CODEGEN_PPC_H_ -+#define V8_PPC_CODEGEN_PPC_H_ -+ -+#include "ast.h" -+#include "ic-inl.h" -+ -+namespace v8 { -+namespace internal { -+ -+// Forward declarations -+class CompilationInfo; -+ -+enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; -+ -+// ------------------------------------------------------------------------- -+// CodeGenerator -+ -+class CodeGenerator: public AstVisitor { -+ public: -+ static bool MakeCode(CompilationInfo* info); -+ -+ // Printing of AST, etc. as requested by flags. -+ static void MakeCodePrologue(CompilationInfo* info); -+ -+ // Allocate and install the code. -+ static Handle MakeCodeEpilogue(MacroAssembler* masm, -+ Code::Flags flags, -+ CompilationInfo* info); -+ -+ // Print the code after compiling it. -+ static void PrintCode(Handle code, CompilationInfo* info); -+ -+ static bool ShouldGenerateLog(Expression* type); -+ -+ static void SetFunctionInfo(Handle fun, -+ FunctionLiteral* lit, -+ bool is_toplevel, -+ Handle