summaryrefslogtreecommitdiffstats
path: root/application/basilisk/components/translation/cld2/internal/port.h
diff options
context:
space:
mode:
Diffstat (limited to 'application/basilisk/components/translation/cld2/internal/port.h')
-rw-r--r--application/basilisk/components/translation/cld2/internal/port.h128
1 files changed, 0 insertions, 128 deletions
diff --git a/application/basilisk/components/translation/cld2/internal/port.h b/application/basilisk/components/translation/cld2/internal/port.h
deleted file mode 100644
index 077e35a2f..000000000
--- a/application/basilisk/components/translation/cld2/internal/port.h
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2013 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//
-// These are weird things we need to do to get this compiling on
-// random systems [subset].
-
-#ifndef BASE_PORT_H_
-#define BASE_PORT_H_
-
-#include <string.h> // for memcpy()
-#include "integral_types.h"
-
-namespace CLD2 {
-
-// Portable handling of unaligned loads, stores, and copies.
-// On some platforms, like ARM, the copy functions can be more efficient
-// then a load and a store.
-
-#if defined(ARCH_PIII) || defined(ARCH_ATHLON) || defined(ARCH_K8) || defined(_ARCH_PPC)
-
-// x86 and x86-64 can perform unaligned loads/stores directly;
-// modern PowerPC hardware can also do unaligned integer loads and stores;
-// but note: the FPU still sends unaligned loads and stores to a trap handler!
-
-#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
-#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
-#define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
-
-#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
-#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
-#define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
-
-#elif defined(__arm__) && \
- !defined(__ARM_ARCH_5__) && \
- !defined(__ARM_ARCH_5T__) && \
- !defined(__ARM_ARCH_5TE__) && \
- !defined(__ARM_ARCH_5TEJ__) && \
- !defined(__ARM_ARCH_6__) && \
- !defined(__ARM_ARCH_6J__) && \
- !defined(__ARM_ARCH_6K__) && \
- !defined(__ARM_ARCH_6Z__) && \
- !defined(__ARM_ARCH_6ZK__) && \
- !defined(__ARM_ARCH_6T2__)
-
-// ARMv7 and newer support native unaligned accesses, but only of 16-bit
-// and 32-bit values (not 64-bit); older versions either raise a fatal signal,
-// do an unaligned read and rotate the words around a bit, or do the reads very
-// slowly (trip through kernel mode). There's no simple #define that says just
-// “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6
-// sub-architectures. Newer gcc (>= 4.6) set an __ARM_FEATURE_ALIGNED #define,
-// so in time, maybe we can move on to that.
-//
-// This is a mess, but there's not much we can do about it.
-
-#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
-#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
-
-#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
-#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
-
-// TODO(sesse): NEON supports unaligned 64-bit loads and stores.
-// See if that would be more efficient on platforms supporting it,
-// at least for copies.
-
-inline uint64 UNALIGNED_LOAD64(const void *p) {
- uint64 t;
- memcpy(&t, p, sizeof t);
- return t;
-}
-
-inline void UNALIGNED_STORE64(void *p, uint64 v) {
- memcpy(p, &v, sizeof v);
-}
-
-#else
-
-#define NEED_ALIGNED_LOADS
-
-// These functions are provided for architectures that don't support
-// unaligned loads and stores.
-
-inline uint16 UNALIGNED_LOAD16(const void *p) {
- uint16 t;
- memcpy(&t, p, sizeof t);
- return t;
-}
-
-inline uint32 UNALIGNED_LOAD32(const void *p) {
- uint32 t;
- memcpy(&t, p, sizeof t);
- return t;
-}
-
-inline uint64 UNALIGNED_LOAD64(const void *p) {
- uint64 t;
- memcpy(&t, p, sizeof t);
- return t;
-}
-
-inline void UNALIGNED_STORE16(void *p, uint16 v) {
- memcpy(p, &v, sizeof v);
-}
-
-inline void UNALIGNED_STORE32(void *p, uint32 v) {
- memcpy(p, &v, sizeof v);
-}
-
-inline void UNALIGNED_STORE64(void *p, uint64 v) {
- memcpy(p, &v, sizeof v);
-}
-
-#endif
-
-} // End namespace CLD2
-
-#endif // BASE_PORT_H_