mirror of
https://github.com/apple/swift.git
synced 2025-12-14 20:36:38 +01:00
554 lines
18 KiB
Swift
554 lines
18 KiB
Swift
//===----------------------------------------------------------*- swift -*-===//
|
|
//
|
|
// This source file is part of the Swift.org open source project
|
|
//
|
|
// Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
|
|
// Licensed under Apache License v2.0 with Runtime Library Exception
|
|
//
|
|
// See https://swift.org/LICENSE.txt for license information
|
|
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
///
|
|
/// This file contains Swift wrappers for functions defined in the C++ runtime.
|
|
///
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
import SwiftShims
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Atomics
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
public typealias _PointerToPointer = UnsafeMutablePointer<UnsafeRawPointer?>
|
|
|
|
@_transparent
|
|
public // @testable
|
|
func _stdlib_atomicCompareExchangeStrongPtr(
|
|
object target: _PointerToPointer,
|
|
expected: _PointerToPointer,
|
|
desired: UnsafeRawPointer?) -> Bool {
|
|
|
|
// We use Builtin.Word here because Builtin.RawPointer can't be nil.
|
|
let (oldValue, won) = Builtin.cmpxchg_seqcst_seqcst_Word(
|
|
target._rawValue,
|
|
UInt(bitPattern: expected.pointee)._builtinWordValue,
|
|
UInt(bitPattern: desired)._builtinWordValue)
|
|
expected.pointee = UnsafeRawPointer(bitPattern: Int(oldValue))
|
|
return Bool(won)
|
|
}
|
|
|
|
% for optional in ['', '?']:
|
|
/// Atomic compare and exchange of `UnsafeMutablePointer<T>` with sequentially
|
|
/// consistent memory ordering. Precise semantics are defined in C++11 or C11.
|
|
///
|
|
/// - Warning: This operation is extremely tricky to use correctly because of
|
|
/// writeback semantics.
|
|
///
|
|
/// It is best to use it directly on an
|
|
/// `UnsafeMutablePointer<UnsafeMutablePointer<T>>` that is known to point
|
|
/// directly to the memory where the value is stored.
|
|
///
|
|
/// In a call like this:
|
|
///
|
|
/// _stdlib_atomicCompareExchangeStrongPtr(&foo.property1.property2, ...)
|
|
///
|
|
/// you need to manually make sure that:
|
|
///
|
|
/// - all properties in the chain are physical (to make sure that no writeback
|
|
/// happens; the compare-and-exchange instruction should operate on the
|
|
/// shared memory); and
|
|
///
|
|
/// - the shared memory that you are accessing is located inside a heap
|
|
/// allocation (a class instance property, a `_HeapBuffer`, a pointer to
|
|
/// an `Array` element etc.)
|
|
///
|
|
/// If the conditions above are not met, the code will still compile, but the
|
|
/// compare-and-exchange instruction will operate on the writeback buffer, and
|
|
/// you will get a *race* while doing writeback into shared memory.
|
|
@_transparent
|
|
public // @testable
|
|
func _stdlib_atomicCompareExchangeStrongPtr<T>(
|
|
object target: UnsafeMutablePointer<UnsafeMutablePointer<T>${optional}>,
|
|
expected: UnsafeMutablePointer<UnsafeMutablePointer<T>${optional}>,
|
|
desired: UnsafeMutablePointer<T>${optional}) -> Bool {
|
|
return _stdlib_atomicCompareExchangeStrongPtr(
|
|
object: unsafeBitCast(target, to: _PointerToPointer.self),
|
|
expected: unsafeBitCast(expected, to: _PointerToPointer.self),
|
|
desired: unsafeBitCast(desired, to: Optional<UnsafeRawPointer>.self))
|
|
}
|
|
% end # optional
|
|
|
|
@_transparent
|
|
@discardableResult
|
|
public // @testable
|
|
func _stdlib_atomicInitializeARCRef(
|
|
object target: UnsafeMutablePointer<AnyObject?>,
|
|
desired: AnyObject) -> Bool {
|
|
var expected: UnsafeRawPointer?
|
|
let desiredPtr = Unmanaged.passRetained(desired).toOpaque()
|
|
let wonRace = _stdlib_atomicCompareExchangeStrongPtr(
|
|
object: unsafeBitCast(target, to: _PointerToPointer.self),
|
|
expected: &expected,
|
|
desired: desiredPtr)
|
|
if !wonRace {
|
|
// Some other thread initialized the value. Balance the retain that we
|
|
// performed on 'desired'.
|
|
Unmanaged.passUnretained(desired).release()
|
|
}
|
|
return wonRace
|
|
}
|
|
|
|
% for bits in [ 32, 64 ]:
|
|
|
|
@_transparent
|
|
public // @testable
|
|
func _stdlib_atomicCompareExchangeStrongUInt${bits}(
|
|
object target: UnsafeMutablePointer<UInt${bits}>,
|
|
expected: UnsafeMutablePointer<UInt${bits}>,
|
|
desired: UInt${bits}) -> Bool {
|
|
|
|
let (oldValue, won) = Builtin.cmpxchg_seqcst_seqcst_Int${bits}(
|
|
target._rawValue, expected.pointee._value, desired._value)
|
|
expected.pointee._value = oldValue
|
|
return Bool(won)
|
|
}
|
|
|
|
@_transparent
|
|
public // @testable
|
|
func _stdlib_atomicCompareExchangeStrongInt${bits}(
|
|
object target: UnsafeMutablePointer<Int${bits}>,
|
|
expected: UnsafeMutablePointer<Int${bits}>,
|
|
desired: Int${bits}) -> Bool {
|
|
|
|
let (oldValue, won) = Builtin.cmpxchg_seqcst_seqcst_Int${bits}(
|
|
target._rawValue, expected.pointee._value, desired._value)
|
|
expected.pointee._value = oldValue
|
|
return Bool(won)
|
|
}
|
|
|
|
@_transparent
|
|
public // @testable
|
|
func _swift_stdlib_atomicStoreUInt${bits}(
|
|
object target: UnsafeMutablePointer<UInt${bits}>,
|
|
desired: UInt${bits}) {
|
|
|
|
Builtin.atomicstore_seqcst_Int${bits}(target._rawValue, desired._value)
|
|
}
|
|
|
|
func _swift_stdlib_atomicStoreInt${bits}(
|
|
object target: UnsafeMutablePointer<Int${bits}>,
|
|
desired: Int${bits}) {
|
|
|
|
Builtin.atomicstore_seqcst_Int${bits}(target._rawValue, desired._value)
|
|
}
|
|
|
|
public // @testable
|
|
func _swift_stdlib_atomicLoadUInt${bits}(
|
|
object target: UnsafeMutablePointer<UInt${bits}>) -> UInt${bits} {
|
|
|
|
let value = Builtin.atomicload_seqcst_Int${bits}(target._rawValue)
|
|
return UInt${bits}(value)
|
|
}
|
|
|
|
func _swift_stdlib_atomicLoadInt${bits}(
|
|
object target: UnsafeMutablePointer<Int${bits}>) -> Int${bits} {
|
|
|
|
let value = Builtin.atomicload_seqcst_Int${bits}(target._rawValue)
|
|
return Int${bits}(value)
|
|
}
|
|
|
|
% for operation in ['Add', 'And', 'Or', 'Xor']:
|
|
// Warning: no overflow checking.
|
|
@_transparent
|
|
public // @testable
|
|
func _swift_stdlib_atomicFetch${operation}UInt${bits}(
|
|
object target: UnsafeMutablePointer<UInt${bits}>,
|
|
operand: UInt${bits}) -> UInt${bits} {
|
|
|
|
let value = Builtin.atomicrmw_${operation.lower()}_seqcst_Int${bits}(
|
|
target._rawValue, operand._value)
|
|
|
|
return UInt${bits}(value)
|
|
}
|
|
|
|
// Warning: no overflow checking.
|
|
func _swift_stdlib_atomicFetch${operation}Int${bits}(
|
|
object target: UnsafeMutablePointer<Int${bits}>,
|
|
operand: Int${bits}) -> Int${bits} {
|
|
|
|
let value = Builtin.atomicrmw_${operation.lower()}_seqcst_Int${bits}(
|
|
target._rawValue, operand._value)
|
|
|
|
return Int${bits}(value)
|
|
}
|
|
% end
|
|
|
|
% end
|
|
|
|
func _stdlib_atomicCompareExchangeStrongInt(
|
|
object target: UnsafeMutablePointer<Int>,
|
|
expected: UnsafeMutablePointer<Int>,
|
|
desired: Int) -> Bool {
|
|
#if arch(i386) || arch(arm)
|
|
let (oldValue, won) = Builtin.cmpxchg_seqcst_seqcst_Int32(
|
|
target._rawValue, expected.pointee._value, desired._value)
|
|
#elseif arch(x86_64) || arch(arm64) || arch(powerpc64) || arch(powerpc64le) || arch(s390x)
|
|
let (oldValue, won) = Builtin.cmpxchg_seqcst_seqcst_Int64(
|
|
target._rawValue, expected.pointee._value, desired._value)
|
|
#endif
|
|
expected.pointee._value = oldValue
|
|
return Bool(won)
|
|
}
|
|
|
|
func _swift_stdlib_atomicStoreInt(
|
|
object target: UnsafeMutablePointer<Int>,
|
|
desired: Int) {
|
|
#if arch(i386) || arch(arm)
|
|
Builtin.atomicstore_seqcst_Int32(target._rawValue, desired._value)
|
|
#elseif arch(x86_64) || arch(arm64) || arch(powerpc64) || arch(powerpc64le) || arch(s390x)
|
|
Builtin.atomicstore_seqcst_Int64(target._rawValue, desired._value)
|
|
#endif
|
|
}
|
|
|
|
@_transparent
|
|
public func _swift_stdlib_atomicLoadInt(
|
|
object target: UnsafeMutablePointer<Int>) -> Int {
|
|
#if arch(i386) || arch(arm)
|
|
let value = Builtin.atomicload_seqcst_Int32(target._rawValue)
|
|
return Int(value)
|
|
#elseif arch(x86_64) || arch(arm64) || arch(powerpc64) || arch(powerpc64le) || arch(s390x)
|
|
let value = Builtin.atomicload_seqcst_Int64(target._rawValue)
|
|
return Int(value)
|
|
#endif
|
|
}
|
|
|
|
@_transparent
|
|
public // @testable
|
|
func _swift_stdlib_atomicLoadPtrImpl(
|
|
object target: UnsafeMutablePointer<OpaquePointer>
|
|
) -> OpaquePointer? {
|
|
let value = Builtin.atomicload_seqcst_Word(target._rawValue)
|
|
return OpaquePointer(bitPattern: Int(value))
|
|
}
|
|
|
|
@_transparent
|
|
public // @testable
|
|
func _stdlib_atomicLoadARCRef(
|
|
object target: UnsafeMutablePointer<AnyObject?>
|
|
) -> AnyObject? {
|
|
let result = _swift_stdlib_atomicLoadPtrImpl(
|
|
object: unsafeBitCast(target, to: UnsafeMutablePointer<OpaquePointer>.self))
|
|
if let unwrapped = result {
|
|
return Unmanaged<AnyObject>.fromOpaque(
|
|
UnsafePointer(unwrapped)).takeUnretainedValue()
|
|
}
|
|
return nil
|
|
}
|
|
|
|
% for operation in ['Add', 'And', 'Or', 'Xor']:
|
|
// Warning: no overflow checking.
|
|
public func _swift_stdlib_atomicFetch${operation}Int(
|
|
object target: UnsafeMutablePointer<Int>,
|
|
operand: Int) -> Int {
|
|
#if arch(i386) || arch(arm)
|
|
return Int(Int32(bitPattern:
|
|
_swift_stdlib_atomicFetch${operation}UInt32(
|
|
object: unsafeBitCast(target, to: UnsafeMutablePointer<UInt32>.self),
|
|
operand: UInt32(bitPattern: Int32(operand)))))
|
|
#elseif arch(x86_64) || arch(arm64) || arch(powerpc64) || arch(powerpc64le) || arch(s390x)
|
|
return Int(Int64(bitPattern:
|
|
_swift_stdlib_atomicFetch${operation}UInt64(
|
|
object: unsafeBitCast(target, to: UnsafeMutablePointer<UInt64>.self),
|
|
operand: UInt64(bitPattern: Int64(operand)))))
|
|
#endif
|
|
}
|
|
% end
|
|
|
|
public final class _stdlib_AtomicInt {
|
|
var _value: Int
|
|
|
|
var _valuePtr: UnsafeMutablePointer<Int> {
|
|
return _getUnsafePointerToStoredProperties(self).assumingMemoryBound(
|
|
to: Int.self)
|
|
}
|
|
|
|
public init(_ value: Int = 0) {
|
|
_value = value
|
|
}
|
|
|
|
public func store(_ desired: Int) {
|
|
return _swift_stdlib_atomicStoreInt(object: _valuePtr, desired: desired)
|
|
}
|
|
|
|
public func load() -> Int {
|
|
return _swift_stdlib_atomicLoadInt(object: _valuePtr)
|
|
}
|
|
|
|
% for operation_name, operation in [ ('Add', '+'), ('And', '&'), ('Or', '|'), ('Xor', '^') ]:
|
|
@discardableResult
|
|
public func fetchAnd${operation_name}(_ operand: Int) -> Int {
|
|
return _swift_stdlib_atomicFetch${operation_name}Int(
|
|
object: _valuePtr,
|
|
operand: operand)
|
|
}
|
|
|
|
public func ${operation_name.lower()}AndFetch(_ operand: Int) -> Int {
|
|
return fetchAnd${operation_name}(operand) ${operation} operand
|
|
}
|
|
% end
|
|
|
|
public func compareExchange(expected: inout Int, desired: Int) -> Bool {
|
|
var expectedVar = expected
|
|
let result = _stdlib_atomicCompareExchangeStrongInt(
|
|
object: _valuePtr,
|
|
expected: &expectedVar,
|
|
desired: desired)
|
|
expected = expectedVar
|
|
return result
|
|
}
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Conversion of primitive types to `String`
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
/// A 32 byte buffer.
|
|
internal struct _Buffer32 {
|
|
% for i in range(32):
|
|
internal var _x${i}: UInt8 = 0
|
|
% end
|
|
|
|
mutating func withBytes<Result>(
|
|
_ body: (UnsafeMutablePointer<UInt8>) throws -> Result
|
|
) rethrows -> Result
|
|
{
|
|
return try withUnsafeMutablePointer(to: &self) {
|
|
try body(UnsafeMutableRawPointer($0).assumingMemoryBound(to: UInt8.self))
|
|
}
|
|
}
|
|
}
|
|
|
|
/// A 72 byte buffer.
|
|
internal struct _Buffer72 {
|
|
% for i in range(72):
|
|
internal var _x${i}: UInt8 = 0
|
|
% end
|
|
|
|
mutating func withBytes<Result>(
|
|
_ body: (UnsafeMutablePointer<UInt8>) throws -> Result
|
|
) rethrows -> Result
|
|
{
|
|
return try withUnsafeMutablePointer(to: &self) {
|
|
try body(UnsafeMutableRawPointer($0).assumingMemoryBound(to: UInt8.self))
|
|
}
|
|
}
|
|
}
|
|
|
|
% for bits in [ 32, 64, 80 ]:
|
|
|
|
% if bits == 80:
|
|
#if !os(Windows) && (arch(i386) || arch(x86_64))
|
|
% end
|
|
|
|
@_silgen_name("swift_float${bits}ToString")
|
|
func _float${bits}ToStringImpl(
|
|
_ buffer: UnsafeMutablePointer<UTF8.CodeUnit>,
|
|
_ bufferLength: UInt, _ value: Float${bits},
|
|
_ debug: Bool
|
|
) -> UInt
|
|
|
|
func _float${bits}ToString(_ value: Float${bits}, debug: Bool) -> String {
|
|
|
|
if !value.isFinite {
|
|
let significand = value.significandBitPattern
|
|
if significand == 0 {
|
|
// Infinity
|
|
return value.sign == .minus ? "-inf" : "inf"
|
|
}
|
|
else {
|
|
// NaN
|
|
if !debug {
|
|
return "nan"
|
|
}
|
|
let isSignaling = (significand & Float${bits}._quietNaNMask) == 0
|
|
let payload = significand & ((Float${bits}._quietNaNMask >> 1) - 1)
|
|
// FIXME(performance): Inefficient String manipulation. We could move
|
|
// this to C function.
|
|
return
|
|
(value.sign == .minus ? "-" : "")
|
|
+ (isSignaling ? "snan" : "nan")
|
|
+ (payload == 0 ? "" : ("(0x" + String(payload, radix: 16) + ")"))
|
|
}
|
|
}
|
|
|
|
_sanityCheck(MemoryLayout<_Buffer32>.size == 32)
|
|
_sanityCheck(MemoryLayout<_Buffer72>.size == 72)
|
|
|
|
var buffer = _Buffer32()
|
|
return buffer.withBytes { (bufferPtr) in
|
|
let actualLength = _float${bits}ToStringImpl(bufferPtr, 32, value, debug)
|
|
return String._fromWellFormedCodeUnitSequence(
|
|
UTF8.self,
|
|
input: UnsafeBufferPointer(start: bufferPtr, count: Int(actualLength)))
|
|
}
|
|
}
|
|
|
|
% if bits == 80:
|
|
#endif
|
|
% end
|
|
|
|
% end
|
|
|
|
@_silgen_name("swift_int64ToString")
|
|
func _int64ToStringImpl(
|
|
_ buffer: UnsafeMutablePointer<UTF8.CodeUnit>,
|
|
_ bufferLength: UInt, _ value: Int64,
|
|
_ radix: Int64, _ uppercase: Bool
|
|
) -> UInt
|
|
|
|
func _int64ToString(
|
|
_ value: Int64, radix: Int64 = 10, uppercase: Bool = false
|
|
) -> String {
|
|
if radix >= 10 {
|
|
var buffer = _Buffer32()
|
|
return buffer.withBytes { (bufferPtr) in
|
|
let actualLength
|
|
= _int64ToStringImpl(bufferPtr, 32, value, radix, uppercase)
|
|
return String._fromWellFormedCodeUnitSequence(
|
|
UTF8.self,
|
|
input: UnsafeBufferPointer(start: bufferPtr, count: Int(actualLength)))
|
|
}
|
|
} else {
|
|
var buffer = _Buffer72()
|
|
return buffer.withBytes { (bufferPtr) in
|
|
let actualLength
|
|
= _int64ToStringImpl(bufferPtr, 72, value, radix, uppercase)
|
|
return String._fromWellFormedCodeUnitSequence(
|
|
UTF8.self,
|
|
input: UnsafeBufferPointer(start: bufferPtr, count: Int(actualLength)))
|
|
}
|
|
}
|
|
}
|
|
|
|
@_silgen_name("swift_uint64ToString")
|
|
func _uint64ToStringImpl(
|
|
_ buffer: UnsafeMutablePointer<UTF8.CodeUnit>,
|
|
_ bufferLength: UInt, _ value: UInt64, _ radix: Int64, _ uppercase: Bool
|
|
) -> UInt
|
|
|
|
public // @testable
|
|
func _uint64ToString(
|
|
_ value: UInt64, radix: Int64 = 10, uppercase: Bool = false
|
|
) -> String {
|
|
if radix >= 10 {
|
|
var buffer = _Buffer32()
|
|
return buffer.withBytes { (bufferPtr) in
|
|
let actualLength
|
|
= _uint64ToStringImpl(bufferPtr, 32, value, radix, uppercase)
|
|
return String._fromWellFormedCodeUnitSequence(
|
|
UTF8.self,
|
|
input: UnsafeBufferPointer(start: bufferPtr, count: Int(actualLength)))
|
|
}
|
|
} else {
|
|
var buffer = _Buffer72()
|
|
return buffer.withBytes { (bufferPtr) in
|
|
let actualLength
|
|
= _uint64ToStringImpl(bufferPtr, 72, value, radix, uppercase)
|
|
return String._fromWellFormedCodeUnitSequence(
|
|
UTF8.self,
|
|
input: UnsafeBufferPointer(start: bufferPtr, count: Int(actualLength)))
|
|
}
|
|
}
|
|
}
|
|
|
|
func _rawPointerToString(_ value: Builtin.RawPointer) -> String {
|
|
var result = _uint64ToString(
|
|
UInt64(
|
|
UInt(bitPattern: UnsafeRawPointer(value))),
|
|
radix: 16,
|
|
uppercase: false
|
|
)
|
|
for _ in 0..<(2 * MemoryLayout<UnsafeRawPointer>.size - result.utf16.count) {
|
|
result = "0" + result
|
|
}
|
|
return "0x" + result
|
|
}
|
|
|
|
#if _runtime(_ObjC)
|
|
// At runtime, these classes are derived from `_SwiftNativeNSXXXBase`,
|
|
// which are derived from `NSXXX`.
|
|
//
|
|
// The @swift_native_objc_runtime_base attribute
|
|
// allows us to subclass an Objective-C class and still use the fast Swift
|
|
// memory allocator.
|
|
|
|
@objc @_swift_native_objc_runtime_base(_SwiftNativeNSArrayBase)
|
|
class _SwiftNativeNSArray {}
|
|
|
|
@objc @_swift_native_objc_runtime_base(_SwiftNativeNSDictionaryBase)
|
|
class _SwiftNativeNSDictionary {}
|
|
|
|
@objc @_swift_native_objc_runtime_base(_SwiftNativeNSSetBase)
|
|
class _SwiftNativeNSSet {}
|
|
|
|
@objc @_swift_native_objc_runtime_base(_SwiftNativeNSEnumeratorBase)
|
|
class _SwiftNativeNSEnumerator {}
|
|
|
|
// FIXME(ABI)#60 : move into the Foundation overlay and remove 'open'
|
|
@objc @_swift_native_objc_runtime_base(_SwiftNativeNSDataBase)
|
|
open class _SwiftNativeNSData {
|
|
public init() {}
|
|
}
|
|
|
|
// FIXME(ABI)#61 : move into the Foundation overlay and remove 'open'
|
|
@objc @_swift_native_objc_runtime_base(_SwiftNativeNSCharacterSetBase)
|
|
open class _SwiftNativeNSCharacterSet {
|
|
public init() {}
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
// Support for reliable testing of the return-autoreleased optimization
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
@objc internal class _stdlib_ReturnAutoreleasedDummy {
|
|
// Use 'dynamic' to force Objective-C dispatch, which uses the
|
|
// return-autoreleased call sequence.
|
|
@objc dynamic func returnsAutoreleased(_ x: AnyObject) -> AnyObject {
|
|
return x
|
|
}
|
|
|
|
// Use 'dynamic' to prevent this call to be duplicated into other modules.
|
|
@objc dynamic func initializeReturnAutoreleased() {
|
|
// On x86_64 it is sufficient to perform one cycle of return-autoreleased
|
|
// call sequence in order to initialize all required PLT entries.
|
|
_ = self.returnsAutoreleased(self)
|
|
}
|
|
}
|
|
|
|
/// This function ensures that the return-autoreleased optimization works.
|
|
///
|
|
/// On some platforms (for example, x86_64), the first call to
|
|
/// `objc_autoreleaseReturnValue` will always autorelease because it would fail
|
|
/// to verify the instruction sequence in the caller. On x86_64 certain PLT
|
|
/// entries would be still pointing to the resolver function, and sniffing
|
|
/// the call sequence would fail.
|
|
///
|
|
/// This code should live in the core stdlib dylib because PLT tables are
|
|
/// separate for each dylib.
|
|
///
|
|
/// Call this function in a fresh autorelease pool.
|
|
public func _stdlib_initializeReturnAutoreleased() {
|
|
// _stdlib_initializeReturnAutoreleasedImpl()
|
|
#if arch(x86_64)
|
|
_stdlib_ReturnAutoreleasedDummy().initializeReturnAutoreleased()
|
|
#endif
|
|
}
|
|
#else
|
|
|
|
class _SwiftNativeNSArray {}
|
|
class _SwiftNativeNSDictionary {}
|
|
class _SwiftNativeNSSet {}
|
|
|
|
#endif
|