1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/mips32/MoveEmitter-mips32.h"
#include "jit/MacroAssembler-inl.h"
using namespace js;
using namespace js::jit;
void
MoveEmitterMIPS::breakCycle(const MoveOperand& from, const MoveOperand& to,
MoveOp::Type type, uint32_t slotId)
{
// There is some pattern:
// (A -> B)
// (B -> A)
//
// This case handles (A -> B), which we reach first. We save B, then allow
// the original move to continue.
switch (type) {
case MoveOp::FLOAT32:
if (to.isMemory()) {
FloatRegister temp = ScratchFloat32Reg;
masm.loadFloat32(getAdjustedAddress(to), temp);
// Since it is uncertain if the load will be aligned or not
// just fill both of them with the same value.
masm.storeFloat32(temp, cycleSlot(slotId, 0));
masm.storeFloat32(temp, cycleSlot(slotId, 4));
} else {
// Just always store the largest possible size.
masm.storeDouble(to.floatReg().doubleOverlay(), cycleSlot(slotId, 0));
}
break;
case MoveOp::DOUBLE:
if (to.isMemory()) {
FloatRegister temp = ScratchDoubleReg;
masm.loadDouble(getAdjustedAddress(to), temp);
masm.storeDouble(temp, cycleSlot(slotId, 0));
} else {
masm.storeDouble(to.floatReg(), cycleSlot(slotId, 0));
}
break;
case MoveOp::INT32:
MOZ_ASSERT(sizeof(uintptr_t) == sizeof(int32_t));
case MoveOp::GENERAL:
if (to.isMemory()) {
Register temp = tempReg();
masm.loadPtr(getAdjustedAddress(to), temp);
masm.storePtr(temp, cycleSlot(0, 0));
} else {
// Second scratch register should not be moved by MoveEmitter.
MOZ_ASSERT(to.reg() != spilledReg_);
masm.storePtr(to.reg(), cycleSlot(0, 0));
}
break;
default:
MOZ_CRASH("Unexpected move type");
}
}
void
MoveEmitterMIPS::completeCycle(const MoveOperand& from, const MoveOperand& to,
MoveOp::Type type, uint32_t slotId)
{
// There is some pattern:
// (A -> B)
// (B -> A)
//
// This case handles (B -> A), which we reach last. We emit a move from the
// saved value of B, to A.
switch (type) {
case MoveOp::FLOAT32:
if (to.isMemory()) {
FloatRegister temp = ScratchFloat32Reg;
masm.loadFloat32(cycleSlot(slotId, 0), temp);
masm.storeFloat32(temp, getAdjustedAddress(to));
} else {
uint32_t offset = 0;
if (from.floatReg().numAlignedAliased() == 1)
offset = sizeof(float);
masm.loadFloat32(cycleSlot(slotId, offset), to.floatReg());
}
break;
case MoveOp::DOUBLE:
if (to.isMemory()) {
FloatRegister temp = ScratchDoubleReg;
masm.loadDouble(cycleSlot(slotId, 0), temp);
masm.storeDouble(temp, getAdjustedAddress(to));
} else {
masm.loadDouble(cycleSlot(slotId, 0), to.floatReg());
}
break;
case MoveOp::INT32:
MOZ_ASSERT(sizeof(uintptr_t) == sizeof(int32_t));
case MoveOp::GENERAL:
MOZ_ASSERT(slotId == 0);
if (to.isMemory()) {
Register temp = tempReg();
masm.loadPtr(cycleSlot(0, 0), temp);
masm.storePtr(temp, getAdjustedAddress(to));
} else {
// Second scratch register should not be moved by MoveEmitter.
MOZ_ASSERT(to.reg() != spilledReg_);
masm.loadPtr(cycleSlot(0, 0), to.reg());
}
break;
default:
MOZ_CRASH("Unexpected move type");
}
}
void
MoveEmitterMIPS::emitDoubleMove(const MoveOperand& from, const MoveOperand& to)
{
// Ensure that we can use ScratchDoubleReg in memory move.
MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchDoubleReg);
MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchDoubleReg);
if (from.isFloatReg()) {
if (to.isFloatReg()) {
masm.moveDouble(from.floatReg(), to.floatReg());
} else if (to.isGeneralRegPair()) {
// Used for passing double parameter in a2,a3 register pair.
// Two moves are added for one double parameter by
// MacroAssembler::passABIArg
MOZ_ASSERT(to.evenReg() == a2 && to.oddReg() == a3,
"Invalid emitDoubleMove arguments.");
masm.moveFromDoubleLo(from.floatReg(), a2);
masm.moveFromDoubleHi(from.floatReg(), a3);
} else {
MOZ_ASSERT(to.isMemory());
masm.storeDouble(from.floatReg(), getAdjustedAddress(to));
}
} else if (to.isFloatReg()) {
MOZ_ASSERT(from.isMemory());
masm.loadDouble(getAdjustedAddress(from), to.floatReg());
} else if (to.isGeneralRegPair()) {
// Used for passing double parameter in a2,a3 register pair.
// Two moves are added for one double parameter by
// MacroAssembler::passABIArg
MOZ_ASSERT(from.isMemory());
MOZ_ASSERT(to.evenReg() == a2 && to.oddReg() == a3,
"Invalid emitDoubleMove arguments.");
masm.loadPtr(getAdjustedAddress(from), a2);
masm.loadPtr(Address(from.base(), getAdjustedOffset(from) + sizeof(uint32_t)), a3);
} else {
MOZ_ASSERT(from.isMemory());
MOZ_ASSERT(to.isMemory());
masm.loadDouble(getAdjustedAddress(from), ScratchDoubleReg);
masm.storeDouble(ScratchDoubleReg, getAdjustedAddress(to));
}
}
|