Commit Graph

165 Commits

Author SHA1 Message Date
Chris Lattner
dc9e21ed89 implement support for conditional destruction of tuple elements.
a testcase like this:

func test(cond : Bool) {
  var x : (SomeClass, SomeClass)

  if cond {
    x.0 = getSomeClass()
  } else {
    x.1 = getSomeClass() 
  }
}

now ends up with an epilog to destroy "x" that looks like this:

  %1 = builtin_function_ref "lshr_Int2" : $@thin @callee_owned (Builtin.Int2, Builtin.Int2) -> Builtin.Int2 // user: %37
  %2 = builtin_function_ref "trunc_Int2_Int1" : $@thin @callee_owned (Builtin.Int2) -> Builtin.Int1 // users: %38, %31
...
  %30 = load %4#1 : $*Builtin.Int2                // users: %37, %31
  %31 = apply %2(%30) : $@thin @callee_owned (Builtin.Int2) -> Builtin.Int1 // user: %32
  cond_br %31, bb4, bb5                           // id: %32

bb4:                                              // Preds: bb3
  %33 = tuple_element_addr %7#1 : $*(SomeClass, SomeClass), 0 // user: %34
  destroy_addr %33 : $*SomeClass                  // id: %34
  br bb5                                          // id: %35

bb5:                                              // Preds: bb3 bb4
  %36 = integer_literal $Builtin.Int2, 1          // user: %37
  %37 = apply %1(%30, %36) : $@thin @callee_owned (Builtin.Int2, Builtin.Int2) -> Builtin.Int2 // user: %38
  %38 = apply %2(%37) : $@thin @callee_owned (Builtin.Int2) -> Builtin.Int1 // user: %39
  cond_br %38, bb6, bb7                           // id: %39

bb6:                                              // Preds: bb5
  %40 = tuple_element_addr %7#1 : $*(SomeClass, SomeClass), 1 // user: %41
  destroy_addr %40 : $*SomeClass                  // id: %41
  br bb7                                          // id: %42

bb7:                                              // Preds: bb5 bb6
  dealloc_stack %7#0 : $*@local_storage (SomeClass, SomeClass) // id: %43





Swift SVN r10701
2013-12-01 05:22:05 +00:00
Chris Lattner
b84fd18419 implement support for full conditional init/assign processing of tuple elements,
generating the appropriate shifting and or'ing of bits in our liveness mask to
treat each tuple element separately.  This allows us to compile something like:

func test(cond : Bool) {
  var x : (SomeClass, SomeClass)

  if cond {
    x.0 = getSomeClass()
  } else {
    x.1 = getSomeClass() 
  }

  x.0 = getSomeClass()
  x.1 = x.0
}

into:

sil @_T1t4testFT4condSb_T_ : $@thin (Bool) -> () {
bb0(%0 : $Bool):
  %1 = builtin_function_ref "or_Int2" : $@thin @callee_owned (Builtin.Int2, Builtin.Int2) -> Builtin.Int2 // users: %40, %24, %15, %57
  %2 = alloc_stack $Builtin.Int2  // var x        // users: %41, %39, %25, %23, %16, %14, %58, %56, %45, %31, %7, %64
  %3 = alloc_stack $Bool  // var cond             // users: %62, %4
  store %0 to %3#1 : $*Bool                       // id: %4
  %5 = alloc_stack $(SomeClass, SomeClass)  // var x // users: %52, %35, %61, %60, %12, %21, %30, %43, %44
  %6 = integer_literal $Builtin.Int2, 0           // user: %7
  store %6 to %2#1 : $*Builtin.Int2               // id: %7
  %8 = struct_extract %0 : $Bool, #value          // user: %9
  cond_br %8, bb1, bb2                            // id: %9

bb1:                                              // Preds: bb0
  // function_ref t.getSomeClass () -> t.SomeClass
  %10 = function_ref @_T1t12getSomeClassFT_CS_9SomeClass : $@thin () -> @owned SomeClass // user: %11
  %11 = apply %10() : $@thin () -> @owned SomeClass // user: %17
  %12 = tuple_element_addr %5#1 : $*(SomeClass, SomeClass), 0 // user: %17
  %13 = integer_literal $Builtin.Int2, 1          // user: %15
  %14 = load %2#1 : $*Builtin.Int2                // user: %15
  %15 = apply %1(%14, %13) : $@thin @callee_owned (Builtin.Int2, Builtin.Int2) -> Builtin.Int2 // user: %16
  store %15 to %2#1 : $*Builtin.Int2              // id: %16
  store %11 to %12 : $*SomeClass                  // id: %17
  br bb3                                          // id: %18

bb2:                                              // Preds: bb0
  // function_ref t.getSomeClass () -> t.SomeClass
  %19 = function_ref @_T1t12getSomeClassFT_CS_9SomeClass : $@thin () -> @owned SomeClass // user: %20
  %20 = apply %19() : $@thin () -> @owned SomeClass // user: %26
  %21 = tuple_element_addr %5#1 : $*(SomeClass, SomeClass), 1 // user: %26
  %22 = integer_literal $Builtin.Int2, -2         // user: %24
  %23 = load %2#1 : $*Builtin.Int2                // user: %24
  %24 = apply %1(%23, %22) : $@thin @callee_owned (Builtin.Int2, Builtin.Int2) -> Builtin.Int2 // user: %25
  store %24 to %2#1 : $*Builtin.Int2              // id: %25
  store %20 to %21 : $*SomeClass                  // id: %26
  br bb3                                          // id: %27

bb3:                                              // Preds: bb2 bb1
  // function_ref t.getSomeClass () -> t.SomeClass
  %28 = function_ref @_T1t12getSomeClassFT_CS_9SomeClass : $@thin () -> @owned SomeClass // user: %29
  %29 = apply %28() : $@thin () -> @owned SomeClass // user: %42
  %30 = tuple_element_addr %5#1 : $*(SomeClass, SomeClass), 0 // user: %42
  %31 = load %2#1 : $*Builtin.Int2                // user: %33
  %32 = builtin_function_ref "trunc_Int2_Int1" : $@thin @callee_owned (Builtin.Int2) -> Builtin.Int1 // user: %33
  %33 = apply %32(%31) : $@thin @callee_owned (Builtin.Int2) -> Builtin.Int1 // user: %34
  cond_br %33, bb4, bb5                           // id: %34

bb4:                                              // Preds: bb3
  %35 = tuple_element_addr %5#1 : $*(SomeClass, SomeClass), 0 // user: %36
  destroy_addr %35 : $*SomeClass                  // id: %36
  br bb5                                          // id: %37

bb5:                                              // Preds: bb3 bb4
  %38 = integer_literal $Builtin.Int2, 1          // user: %40
  %39 = load %2#1 : $*Builtin.Int2                // user: %40
  %40 = apply %1(%39, %38) : $@thin @callee_owned (Builtin.Int2, Builtin.Int2) -> Builtin.Int2 // user: %41
  store %40 to %2#1 : $*Builtin.Int2              // id: %41
  store %29 to %30 : $*SomeClass                  // id: %42
  %43 = tuple_element_addr %5#1 : $*(SomeClass, SomeClass), 0 // user: %59
  %44 = tuple_element_addr %5#1 : $*(SomeClass, SomeClass), 1 // user: %59
  %45 = load %2#1 : $*Builtin.Int2                // user: %48
  %46 = builtin_function_ref "lshr_Int2" : $@thin @callee_owned (Builtin.Int2, Builtin.Int2) -> Builtin.Int2 // user: %48
  %47 = integer_literal $Builtin.Int2, 1          // user: %48
  %48 = apply %46(%45, %47) : $@thin @callee_owned (Builtin.Int2, Builtin.Int2) -> Builtin.Int2 // user: %50
  %49 = builtin_function_ref "trunc_Int2_Int1" : $@thin @callee_owned (Builtin.Int2) -> Builtin.Int1 // user: %50
  %50 = apply %49(%48) : $@thin @callee_owned (Builtin.Int2) -> Builtin.Int1 // user: %51
  cond_br %50, bb6, bb7                           // id: %51

bb6:                                              // Preds: bb5
  %52 = tuple_element_addr %5#1 : $*(SomeClass, SomeClass), 1 // user: %53
  destroy_addr %52 : $*SomeClass                  // id: %53
  br bb7                                          // id: %54

bb7:                                              // Preds: bb5 bb6
  %55 = integer_literal $Builtin.Int2, -2         // user: %57
  %56 = load %2#1 : $*Builtin.Int2                // user: %57
  %57 = apply %1(%56, %55) : $@thin @callee_owned (Builtin.Int2, Builtin.Int2) -> Builtin.Int2 // user: %58
  store %57 to %2#1 : $*Builtin.Int2              // id: %58
  copy_addr %43 to [initialization] %44 : $*SomeClass // id: %59
  destroy_addr %5#1 : $*(SomeClass, SomeClass)    // id: %60
  dealloc_stack %5#0 : $*@local_storage (SomeClass, SomeClass) // id: %61
  dealloc_stack %3#0 : $*@local_storage Bool      // id: %62
  %63 = tuple ()                                  // user: %65
  dealloc_stack %2#0 : $*@local_storage Builtin.Int2 // id: %64
  return %63 : $()                                // id: %65
}

Which ends up producing this LLVM IR at -O3:

define void @_T1t4testFT4condSb_T_(i1) #0 {
entry:
  %1 = tail call noalias %swift.refcounted* @swift_allocObject(...
  %2 = tail call noalias %swift.refcounted* @swift_allocObject(...
  tail call void @swift_release(%swift.refcounted* %2) #0
  tail call void @swift_release(%swift.refcounted* %1) #0
  ret void
}



Swift SVN r10700
2013-12-01 05:10:48 +00:00
Chris Lattner
f0f997a391 rework the conditional init/assign algorithm to generate destroy_addrs instead
of generating a CFG diamond with a copy of the store (in init and assign forms)
on each arm.  This generalizes to operations that touch multiple tuple elements
better.


Swift SVN r10698
2013-12-01 02:40:01 +00:00
Chris Lattner
1a612b6d13 rewrite the logic to handle conditional destroys to use the same control variable
as is used by conditional inits.  This allows some simplifications and sharing
of concepts, and makes sure that we emit at most one control variable for each
memory object being DI'd.


Swift SVN r10688
2013-11-30 00:19:35 +00:00
Chris Lattner
583d93fd3d Implement support for DI emitting conditional control variable that tracks the
liveness of a memory object throughout the flow graph.  This implements support
for conditional liveness (e.g. the testcase) of general types, but top level
tuples are not handled properly yet.  This is progress to implementing 
rdar://15530750.


Swift SVN r10687
2013-11-29 23:38:29 +00:00
Chris Lattner
b312116ee9 adopt IsInitialization_t in LowerAssignInstruction,
and inline handleInconsistentInitOrAssign into its
caller, since it will remain simple.  Also, add an
assert to make sure the handleStoreUse covers all 
instructions possible in it.  NFC.


Swift SVN r10685
2013-11-29 16:57:57 +00:00
Chris Lattner
eeb1b16830 now that things are rearranged to our liking, introduce a new function
to handle stores that are either an assign or an init, depending on the
control flow leading to them.  This case needs to have a diamond inserted
in the CFG in the general case.  For now, we whitelist trivial types,
since init and assign are the same for them.  This fixes rdar://15530750
for trivial types.


Swift SVN r10684
2013-11-29 07:09:35 +00:00
Chris Lattner
4a50ba45e3 further improve classification: coming out of silgen, we don't know if
anything is a proper reassignment: we only know that it could be either
InitOrAssign or Init.  Classify stores as such, and then have DI classify
things into Assign when it is clearly an overwrite.  This allows later
iterations to avoid reanalyzing generated instructions and allows more
precise reasoning about ambiguous initializations.



Swift SVN r10683
2013-11-29 06:44:15 +00:00
Chris Lattner
8e1b68e4e8 refactor a "InsertCFGDiamond" function out of the conditional destroy logic. NFC.
Swift SVN r10682
2013-11-28 21:28:31 +00:00
Chris Lattner
3e20621708 introduce a new store classification "InitOrAssign" since the trivial type
stores coming out of SILGen are unknown whether they are init or assign.
No functionality change.



Swift SVN r10681
2013-11-28 20:58:55 +00:00
Chris Lattner
510ed4ccfa Introduce a new use classifier: "Initialization". This allows us to
tag things known from SILGen to be initializers (e.g., copyaddrs with
the bit set) and when we analyze the DI properties for an assign, we
can move it to this classification.  This allows stuff that wants to
reason about the difference (e.g. the conditional destroy logic) to
do so precisely.


Swift SVN r10658
2013-11-22 06:17:32 +00:00
Chris Lattner
795656f3f7 Have DI generate control variables and conditional destruction
logic for destroy_addrs of memory that is only live on some 
paths.  This finally wraps up rdar://15209088.

The final missing DI feature for local/global variables is the
conditional "assign" vs "init" case now.



Swift SVN r10654
2013-11-22 00:48:42 +00:00
Chris Lattner
b8e7ca2996 Only check elements that we care about. This is only a cleanup, no
functionality change.


Swift SVN r10630
2013-11-21 07:28:54 +00:00
Chris Lattner
1f55287ad2 teach definite init to correctly handle releases of tuples where
each element has a known yes/no liveness, as a generalization of the
"none are alive" logic.  Now the only piece missing is the
flow sensitive liveness case.

It is pretty nice that DI is able to transform the testcase into this
simple logic:

sil @release_some_constructed : $@thin () -> () {
bb0:
  %0 = tuple ()
  %1 = function_ref @getSomeClass : $@thin () -> @owned SomeClass // user: %2
  %2 = apply %1() : $@thin () -> @owned SomeClass // user: %3
  strong_release %2 : $SomeClass                  // id: %3
  %4 = tuple ()                                   // user: %5
  return %4 : $()                                 // id: %5
}




Swift SVN r10629
2013-11-21 07:20:56 +00:00
Chris Lattner
2c109ff75d Rename checkDefinitelyInit -> getLivenessAtUse, and split it
into two methods: one that determines the liveness of a set of
elements at a point in the CFG, and one the merges the result
across the elements used by a particular access.  The 
getLivenessAtUse is useful for traditional DI properties, but
the vector-returning one is important for analyzing the liveness
at a release.  Still no functionality change.


Swift SVN r10625
2013-11-21 05:46:21 +00:00
Chris Lattner
78cc047f5b improve encapsulation by introducing a new AvailabilitySet type
that wraps llvm::SmallVector of bit pairs - centralizing the logic
for working on it, and abstracting the bit pair encoding.  Also
add a comment describing what this is doing in lattice theoretic
terms.


Swift SVN r10624
2013-11-21 05:05:40 +00:00
Adrian Prantl
5c812906ca Revert "Revert r10613, the debug info problems are significant."
This reverts commit 41a6515ba08219c284a408448d790a783e64dad1.

Swift SVN r10616
2013-11-21 00:50:51 +00:00
Chris Lattner
c5034f8504 Revert r10613, the debug info problems are significant.
Swift SVN r10614
2013-11-21 00:12:34 +00:00
Chris Lattner
41e3d0fc23 Re-commit Stephen's patch: Enable capture promotion pass by default
This fixes up various testcases.  Note that 3 debug info tests are asserting
in IRGen for a common but unknown-to-me reason, I've XFAILed them for now.


Swift SVN r10613
2013-11-21 00:09:35 +00:00
Chris Lattner
07ce5306f3 add a SILType::isTrivial helper function.
Swift SVN r10610
2013-11-20 22:53:49 +00:00
Chris Lattner
c3907b3867 Teach DI to remove destroy_addrs on paths where the memory is always uninitialized.
Doing this exposed that the dataflow analysis in DI was pretty fundamentaly broken:
it was trying to compute blocks where the (tuple elements of the) memory were either
initialized, uninitialized, or partially initialized (initialized only on some paths)
but it conflated partial with its unknown state, causing it to get the wrong results
a lot of the time.  Rewrite things so that it is correct.



Swift SVN r10594
2013-11-20 16:50:45 +00:00
John McCall
20e58dcf93 Change the type of function values in SIL to SILFunctionType.
Perform major abstraction remappings in SILGen.  Introduce
thunking functions as necessary to map between abstraction
patterns.

Swift SVN r10562
2013-11-19 22:55:09 +00:00
Chris Lattner
614ab66d6b Fix a bug Joe found in DI: when exploding a copyaddr that is
loading from our current allocation, we'd sometimes add loads
corresponding to the "assign" to the use list for this memory
object.

No testcase as this will be tested by Joe's forthcoming patch
to produce more copyaddrs from silgen.


Swift SVN r10559
2013-11-19 19:52:05 +00:00
Chris Lattner
5e94be5073 Fix rdar://15492647 - destroy_addr dropped by DI, causing leaks
Fix DI to not delete non-trivial memory allocations that are still 
used by destroy_addr or strong_release, since just dropping those operations
will cause a memory leak.

This is simple, but prevents DI from removing most allocations.  To
avoid having to update all the testcases :), teach it also to promote
non-address-only destroy_addrs into a load+release sequence when the
load's value is available, as part of normal load promotion.



Swift SVN r10542
2013-11-18 17:35:56 +00:00
Chris Lattner
ae2bdb8d6a Enhance DI to know that trivial types don't have complicated cleanup semantics,
so they don't need code motion on releases.  This fixes Jordan's testcase from
rdar://15209088, though non-trivial types still don't work.


Swift SVN r10539
2013-11-18 15:47:34 +00:00
Chris Lattner
bb6df84e1e now that we have a more disciplined approach to copyaddr explosions,
we don't need logic to compute conditional access paths.  Simplify the code.


Swift SVN r10538
2013-11-18 15:34:23 +00:00
Chris Lattner
feee9f6332 pull release processing into the main DI flow, no functionality change.
Swift SVN r10537
2013-11-18 15:28:48 +00:00
Chris Lattner
ec0e1e6feb Rewrite most of DI to work on the entire memory object at once,
instead of working a tuple-element at a time.  This requires it
to use bitvectors to compute each tuple element's liveness separately.

A big part of this is introducing the new MemoryUse type, which wraps
up the tuple elements accessed by a user of the memory object in addition
to its kind, and extending the NonLoadUses set to keep an index into the
Uses map, and a lot of other things.

This wraps up:
<rdar://problem/15228619> Eliminate the notion of Element from DefiniteInitialization



Swift SVN r10520
2013-11-17 05:14:52 +00:00
Chris Lattner
2775de4dc4 releases apply to the entire memory object, not to tuple elements within it,
stop tracking them on a per-element basis.  They also don't make sense for
global variables, so verify they don't occur there.


Swift SVN r10501
2013-11-15 22:23:06 +00:00
Chris Lattner
a3ea22349a split processing of "release" instructions out to a separate list,
since they are handled in a completely different way from other uses
and already had to be std::partitioned to the end anyway.  They will
only become more different over time.


Swift SVN r10500
2013-11-15 21:55:38 +00:00
Chris Lattner
4962fc6c9b merge more and harder.
Swift SVN r10499
2013-11-15 21:19:42 +00:00
Chris Lattner
19355152d0 merge the alloc_stack and alloc_box processing logic. NFC.
Swift SVN r10498
2013-11-15 21:12:30 +00:00
Chris Lattner
27ff69ebb3 Bitpack LiveOutBlockState to fit into a single byte. Add some
tests for empty structs (which already pass).  No functionality
change.


Swift SVN r10497
2013-11-15 20:59:00 +00:00
John McCall
2ed33e4ffa Make convenient accessors for getting a lowered SILType for
a struct/class field or a tuple element.

Make DefiniteInitialization traffic in SILTypes more.

Swift SVN r10055
2013-11-08 22:04:04 +00:00
Chris Lattner
153447496f fix rdar://15379013 - Definite initialization inappropriately changes existential initialization to assignment
We were incorrectly treating init_existential as equivalent to a struct
access, where for DI purposes, it is much more like an enum access: a store
to the result of a (not further indexed) init_existential pointer should be
treated as a full store, not a partial store.



Swift SVN r9933
2013-11-04 22:38:26 +00:00
Chris Lattner
a0c5afa7ff re-commit r9816, allowing DI to delete allocations *in* transparent
functions.  Before it would only delete them once they got inlined,
which is a waste of compile time and serves no purpose.

This exposed a bug in mandatory inlining, which is now fixed.  It
strinks the stdlib by 4500 lines, almost 10%.



Swift SVN r9906
2013-11-03 18:07:39 +00:00
Chris Lattner
a2a1b2af55 Clean up tuple and extract SILBuilder stuff:
- Introduce emitTupleExtract / emitStructExtract, which fold when their operand is a tuple/struct.
- Rename SILBuilder::createTupleExtractInst -> createTupleExtract, "Inst" isn't used as a suffix.
- Switch capture promotion and DI to use the new functions.

This trims 300 lines out of the stdlib.


Swift SVN r9897
2013-11-03 04:47:40 +00:00
Dmitri Hrybenko
c2425d5afd Revert r9816. It causes three tests to enter an inf loop:
Interpreter/archetype_casts.swift
Interpreter/classes.swift
Interpreter/generic_casts.swift


Swift SVN r9829
2013-10-31 00:01:56 +00:00
Chris Lattner
a667e61fc3 Make the "delete store only stack allocations" logic delete all allocations
in transparent functions.  They will be deleted anyway when they get inlined
into callers, so there is no reason to do the work to carry them around and
inline them, only to delete them.

This shrinks the sil for the stdlib by about 5%: from 71672 lines to 67718 lines.



Swift SVN r9818
2013-10-30 22:07:44 +00:00
Chris Lattner
2d0bc50c74 teach definite initialization to handle dealloc_addr instructions, instead of treating
them as an escape point.


Swift SVN r9725
2013-10-28 20:38:31 +00:00
Chris Lattner
1aac046e71 reimplement alloc_stack/box deletion in DI instead of being
part of allocbox_to_stack.  This is simpler and more efficent
given the info DI already has, and this enables future progress.


Swift SVN r9707
2013-10-28 16:26:04 +00:00
Chris Lattner
cbeb113a4f Fix DI to remove assign instructions from its Uses list when it
lowers them (causing the pointer to dangle).   This doesn't 
currently manifest as a problem, but will with some changes that 
are coming.


Swift SVN r9706
2013-10-28 15:44:04 +00:00
Chris Lattner
c118613de3 Fix SILValue use_iterators to type the user as a SILInstruction, not just a ValueBase.
There are no values other than instructions that can use other values.  BBArguments are
defs, not uses.  This eliminates a bunch of casts in clients that use getUser().


Swift SVN r9701
2013-10-27 23:32:14 +00:00
Chris Lattner
5f190a8e0b implement cross-block load promotion in definite initialization. This allows
us to diagnose things that span multiple blocks, e.g.:

  var xu8_3 : UInt8 = 240   // Global (cross block) analysis.
  for i in 0..10 {}
  xu8_3 += 40 // expected-error {{arithmetic operation '240 + 40' (on type 'UInt8') results in an overflow}}

This doesn't do full SSA construction in that it won't generate "phi" nodes, but will promote any loads that can be substituted with a (potentially field sensitive and decomposed) single value.  Given that no diagnostics passes are going to be looking through BB args anyway, this seems like the right level of sophistication for definite init.



Swift SVN r9523
2013-10-20 08:11:53 +00:00
Chris Lattner
be353ab347 simplify some code by getting rid of some bool results,
and computing their result at the top level.  No functionality
change.


Swift SVN r9517
2013-10-19 03:34:03 +00:00
Chris Lattner
823dab7fa8 code cleanups: remove a pointless cl::opt and inline some now really similar functions.
Swift SVN r9347
2013-10-15 06:43:38 +00:00
Chris Lattner
528ab405f8 Teach definite-init to forward stored values to copy_addrs,
exploding them on demand.  This finally resolves rdar://15170149,
which allows a bunch of tests in SILPasses/constant_propagation.swift
to work.



Swift SVN r9346
2013-10-15 06:28:47 +00:00
Chris Lattner
46e3b7b656 Several changes:
- Enhance the driver of definite initialization to scalarize copyaddrs
  that span multiple tuple elements so that DI can only thing of a single
  element at a time (when it comes to copy_addrs - inout and escapes can 
  span anything of course)
- Teach load eliminate to explode copyaddrs into its components when
  doing so would allow eliminating a load.
- Fix a bug where indexing into a tuple inside an enum could cause
  accesses to be attributed to the wrong element, which manifested
  as a crash on the testsuite now that DI is being more aggressive.

Unfortunately, DI is now exploding some copyaddrs that feed inout shadows,
disabling the inout shadow optimization on something important for 
stdlib/String.swift.  This is a regression, but is less important than
unblocking work dependent on rdar://15170149, so I'm just dialing back
the testcase and tracking this with rdar://15228172.


Swift SVN r9345
2013-10-15 04:15:32 +00:00
Chris Lattner
74645a2472 Step #2 of the load promotion rewrite: change how assigns are handled.
Previously, the definite init pass would check to see if there was a
load available to optimize out the load of a non-trivial assign.  Now
we just unconditionally lower an assign to a load/copy/store sequence,
and lead later load elimination zap the load if it is redundant.

This allows us to remove the old mechanics for doing reasoning about
load elimination, including all the "AccessPath" stuff.


Swift SVN r9340
2013-10-15 00:26:22 +00:00
Chris Lattner
5708f5d28c Take a big step towards rewriting how definite init promotes loads
into SSA form, moving to resolving rdar://15170149.

Instead of promoting loads in the middle of the process of proving
that everything passes DI requirements, do this after a full element
is ok.

Doing this requires changing just about everything about how we modeled
elements and subelements being promoted, but this is good for a lot of
reasons:
1) we now don't eagerly scalarize all loads to struct members, now we
   just scalarize top-level tuples.  This is good for preserving higher
   level operations.
2) Even without scalarization, we can still model things at an extremely 
   fine grain, but we can also keep aggregate operations together.
   This means that load promotion doesn't insert a kajillion 
   struct_extract + struct sequences to decompose then recompose things.

This is just patch #1 of a series, but this is the bulk of the hard work.



Swift SVN r9338
2013-10-15 00:03:57 +00:00