@@ -308,7 +308,7 @@ jl_method_t *jl_mk_builtin_func(jl_datatype_t *dt, jl_sym_t *sname, jl_fptr_args
308
308
m -> isva = 1 ;
309
309
m -> nargs = 2 ;
310
310
jl_atomic_store_relaxed (& m -> primary_world , 1 );
311
- jl_atomic_store_relaxed (& m -> dispatch_status , METHOD_SIG_LATEST_ONLY | METHOD_SIG_LATEST_ONLY );
311
+ jl_atomic_store_relaxed (& m -> dispatch_status , METHOD_SIG_LATEST_ONLY | METHOD_SIG_LATEST_WHICH );
312
312
m -> sig = (jl_value_t * )jl_anytuple_type ;
313
313
m -> slot_syms = jl_an_empty_string ;
314
314
m -> nospecialize = 0 ;
@@ -1505,22 +1505,26 @@ jl_method_instance_t *cache_method(
1505
1505
size_t world , size_t min_valid , size_t max_valid ,
1506
1506
jl_svec_t * sparams )
1507
1507
{
1508
- // caller must hold the parent->writelock
1508
+ // caller must hold the parent->writelock, which this releases
1509
1509
// short-circuit (now that we hold the lock) if this entry is already present
1510
1510
int8_t offs = mc ? jl_cachearg_offset () : 1 ;
1511
1511
{ // scope block
1512
1512
if (mc ) {
1513
1513
jl_genericmemory_t * leafcache = jl_atomic_load_relaxed (& mc -> leafcache );
1514
1514
jl_typemap_entry_t * entry = lookup_leafcache (leafcache , (jl_value_t * )tt , world );
1515
- if (entry )
1515
+ if (entry ) {
1516
+ if (mc ) JL_UNLOCK (& mc -> writelock );
1516
1517
return entry -> func .linfo ;
1518
+ }
1517
1519
}
1518
1520
struct jl_typemap_assoc search = {(jl_value_t * )tt , world , NULL };
1519
1521
jl_typemap_t * cacheentry = jl_atomic_load_relaxed (cache );
1520
1522
assert (cacheentry != NULL );
1521
1523
jl_typemap_entry_t * entry = jl_typemap_assoc_by_type (cacheentry , & search , offs , /*subtype*/ 1 );
1522
- if (entry && entry -> func .value )
1524
+ if (entry && entry -> func .value ) {
1525
+ if (mc ) JL_UNLOCK (& mc -> writelock );
1523
1526
return entry -> func .linfo ;
1527
+ }
1524
1528
}
1525
1529
1526
1530
jl_method_instance_t * newmeth = NULL ;
@@ -1533,6 +1537,7 @@ jl_method_instance_t *cache_method(
1533
1537
JL_GC_PUSH1 (& newentry );
1534
1538
jl_typemap_insert (cache , parent , newentry , offs );
1535
1539
JL_GC_POP ();
1540
+ if (mc ) JL_UNLOCK (& mc -> writelock );
1536
1541
return newmeth ;
1537
1542
}
1538
1543
}
@@ -1577,12 +1582,20 @@ jl_method_instance_t *cache_method(
1577
1582
if (newmeth -> cache_with_orig )
1578
1583
cache_with_orig = 1 ;
1579
1584
1585
+ // Capture world counter at start to detect races
1586
+ size_t current_world = mc ? jl_atomic_load_acquire (& jl_world_counter ) : ~(size_t )0 ;
1587
+ int unconstrained_max = max_valid > current_world ;
1588
+ if (unconstrained_max ) {
1589
+ assert (max_valid == ~(size_t )0 );
1590
+ max_valid = current_world ;
1591
+ }
1592
+
1580
1593
jl_tupletype_t * cachett = tt ;
1581
- jl_svec_t * guardsigs = jl_emptysvec ;
1594
+ jl_svec_t * guardsigs = jl_emptysvec ;
1582
1595
if (!cache_with_orig && mt ) {
1583
1596
// now examine what will happen if we chose to use this sig in the cache
1584
1597
size_t min_valid2 = 1 ;
1585
- size_t max_valid2 = ~( size_t ) 0 ;
1598
+ size_t max_valid2 = current_world ;
1586
1599
temp = ml_matches (mt , mc , compilationsig , MAX_UNSPECIALIZED_CONFLICTS , 1 , 1 , world , 0 , & min_valid2 , & max_valid2 , NULL );
1587
1600
int guards = 0 ;
1588
1601
if (temp == jl_nothing ) {
@@ -1726,12 +1739,91 @@ jl_method_instance_t *cache_method(
1726
1739
}
1727
1740
}
1728
1741
}
1742
+ if (mc ) {
1743
+ JL_UNLOCK (& mc -> writelock );
1744
+
1745
+ // Only set METHOD_SIG_LATEST_ONLY on method instance if method does NOT have the bit, no guards required, and min_valid == primary_world
1746
+ int should_set_dispatch_status = !(jl_atomic_load_relaxed (& definition -> dispatch_status ) & METHOD_SIG_LATEST_ONLY ) &&
1747
+ (!cache_with_orig && jl_svec_len (guardsigs ) == 0 ) &&
1748
+ min_valid == jl_atomic_load_relaxed (& definition -> primary_world ) &&
1749
+ !(jl_atomic_load_relaxed (& newmeth -> dispatch_status ) & METHOD_SIG_LATEST_ONLY );
1750
+
1751
+ // Combined trylock for both dispatch_status setting and max_world restoration
1752
+ if ((should_set_dispatch_status || unconstrained_max ) &&
1753
+ jl_atomic_load_relaxed (& jl_world_counter ) == current_world ) {
1754
+ JL_LOCK (& world_counter_lock );
1755
+ if (jl_atomic_load_relaxed (& jl_world_counter ) == current_world ) {
1756
+ if (should_set_dispatch_status ) {
1757
+ jl_atomic_store_relaxed (& newmeth -> dispatch_status , METHOD_SIG_LATEST_ONLY );
1758
+ }
1759
+ if (unconstrained_max ) {
1760
+ jl_atomic_store_relaxed (& newentry -> max_world , ~(size_t )0 );
1761
+ }
1762
+ }
1763
+ JL_UNLOCK (& world_counter_lock );
1764
+ }
1765
+ }
1729
1766
1730
1767
JL_GC_POP ();
1731
1768
return newmeth ;
1732
1769
}
1733
1770
1734
- static jl_method_match_t * _gf_invoke_lookup (jl_value_t * types JL_PROPAGATES_ROOT , jl_methtable_t * mt , size_t world , size_t * min_valid , size_t * max_valid );
1771
+ static void _jl_promote_ci_to_current (jl_code_instance_t * ci , size_t validated_world ) JL_NOTSAFEPOINT
1772
+ {
1773
+ if (jl_atomic_load_relaxed (& ci -> max_world ) != validated_world )
1774
+ return ;
1775
+ jl_atomic_store_relaxed (& ci -> max_world , ~(size_t )0 );
1776
+ jl_svec_t * edges = jl_atomic_load_relaxed (& ci -> edges );
1777
+ for (size_t i = 0 ; i < jl_svec_len (edges ); i ++ ) {
1778
+ jl_value_t * edge = jl_svecref (edges , i );
1779
+ if (!jl_is_code_instance (edge ))
1780
+ continue ;
1781
+ _jl_promote_ci_to_current ((jl_code_instance_t * )edge , validated_world );
1782
+ }
1783
+ }
1784
+
1785
+ JL_DLLEXPORT void jl_promote_cis_to_current (jl_code_instance_t * * cis , size_t n , size_t validated_world )
1786
+ {
1787
+ size_t current_world = jl_atomic_load_relaxed (& jl_world_counter );
1788
+ // No need to acquire the lock if we've been invalidated anyway
1789
+ if (current_world > validated_world )
1790
+ return ;
1791
+ JL_LOCK (& world_counter_lock );
1792
+ current_world = jl_atomic_load_relaxed (& jl_world_counter );
1793
+ if (current_world == validated_world ) {
1794
+ for (size_t i = 0 ; i < n ; i ++ ) {
1795
+ _jl_promote_ci_to_current (cis [i ], validated_world );
1796
+ }
1797
+ }
1798
+ JL_UNLOCK (& world_counter_lock );
1799
+ }
1800
+
1801
+ JL_DLLEXPORT void jl_promote_ci_to_current (jl_code_instance_t * ci , size_t validated_world )
1802
+ {
1803
+ jl_promote_cis_to_current (& ci , 1 , validated_world );
1804
+ }
1805
+
1806
+ JL_DLLEXPORT void jl_promote_mi_to_current (jl_method_instance_t * mi , size_t min_world , size_t validated_world )
1807
+ {
1808
+ size_t current_world = jl_atomic_load_relaxed (& jl_world_counter );
1809
+ // No need to acquire the lock if we've been invalidated anyway
1810
+ if (current_world > validated_world )
1811
+ return ;
1812
+ // Only set METHOD_SIG_LATEST_ONLY on method instance if method does NOT have the bit and min_valid == primary_world
1813
+ jl_method_t * definition = mi -> def .method ;
1814
+ if ((jl_atomic_load_relaxed (& definition -> dispatch_status ) & METHOD_SIG_LATEST_ONLY ) ||
1815
+ min_world != jl_atomic_load_relaxed (& definition -> primary_world ) ||
1816
+ (jl_atomic_load_relaxed (& mi -> dispatch_status ) & METHOD_SIG_LATEST_ONLY ))
1817
+ return ;
1818
+ JL_LOCK (& world_counter_lock );
1819
+ current_world = jl_atomic_load_relaxed (& jl_world_counter );
1820
+ if (current_world == validated_world ) {
1821
+ jl_atomic_store_relaxed (& mi -> dispatch_status , METHOD_SIG_LATEST_ONLY );
1822
+ }
1823
+ JL_UNLOCK (& world_counter_lock );
1824
+ }
1825
+
1826
+ static jl_method_match_t * _gf_invoke_lookup (jl_value_t * types JL_PROPAGATES_ROOT , jl_methtable_t * mt , size_t world , int cache , size_t * min_valid , size_t * max_valid );
1735
1827
1736
1828
JL_DLLEXPORT jl_typemap_entry_t * jl_mt_find_cache_entry (jl_methcache_t * mc JL_PROPAGATES_ROOT , jl_datatype_t * tt JL_MAYBE_UNROOTED JL_ROOTS_TEMPORARILY , size_t world )
1737
1829
{ // exported only for debugging purposes, not for casual use
@@ -1765,11 +1857,13 @@ static jl_method_instance_t *jl_mt_assoc_by_type(jl_methcache_t *mc JL_PROPAGATE
1765
1857
if (!mi ) {
1766
1858
size_t min_valid = 0 ;
1767
1859
size_t max_valid = ~(size_t )0 ;
1768
- matc = _gf_invoke_lookup ((jl_value_t * )tt , jl_method_table , world , & min_valid , & max_valid );
1860
+ matc = _gf_invoke_lookup ((jl_value_t * )tt , jl_method_table , world , 0 , & min_valid , & max_valid );
1769
1861
if (matc ) {
1770
1862
jl_method_t * m = matc -> method ;
1771
1863
jl_svec_t * env = matc -> sparams ;
1772
1864
mi = cache_method (jl_method_table , mc , & mc -> cache , (jl_value_t * )mc , tt , m , world , min_valid , max_valid , env );
1865
+ JL_GC_POP ();
1866
+ return mi ;
1773
1867
}
1774
1868
}
1775
1869
JL_UNLOCK (& mc -> writelock );
@@ -2126,6 +2220,7 @@ static int _invalidate_dispatch_backedges(jl_method_instance_t *mi, jl_value_t *
2126
2220
// invalidate cached methods that overlap this definition
2127
2221
static void invalidate_backedges (jl_method_instance_t * replaced_mi , size_t max_world , const char * why )
2128
2222
{
2223
+ // Reset dispatch_status when method instance is replaced
2129
2224
JL_LOCK (& replaced_mi -> def .method -> writelock );
2130
2225
_invalidate_backedges (replaced_mi , NULL , max_world , 1 );
2131
2226
JL_UNLOCK (& replaced_mi -> def .method -> writelock );
@@ -2136,6 +2231,7 @@ static void invalidate_backedges(jl_method_instance_t *replaced_mi, size_t max_w
2136
2231
jl_array_ptr_1d_push (_jl_debug_method_invalidation , loctag );
2137
2232
JL_GC_POP ();
2138
2233
}
2234
+ jl_atomic_store_relaxed (& replaced_mi -> dispatch_status , 0 );
2139
2235
}
2140
2236
2141
2237
// add a backedge from callee to caller
@@ -2633,6 +2729,8 @@ void jl_method_table_activate(jl_typemap_entry_t *newentry)
2633
2729
// call invalidate_backedges(mi, max_world, "jl_method_table_insert");
2634
2730
// but ignore invoke-type edges
2635
2731
int invalidatedmi = _invalidate_dispatch_backedges (mi , type , m , d , n , replaced_dispatch , ambig , max_world , morespec );
2732
+ if (replaced_dispatch )
2733
+ jl_atomic_store_relaxed (& mi -> dispatch_status , 0 );
2636
2734
jl_array_ptr_1d_push (oldmi , (jl_value_t * )mi );
2637
2735
if (_jl_debug_method_invalidation && invalidatedmi ) {
2638
2736
jl_array_ptr_1d_push (_jl_debug_method_invalidation , (jl_value_t * )mi );
@@ -3404,7 +3502,6 @@ JL_DLLEXPORT jl_method_instance_t *jl_method_match_to_mi(jl_method_match_t *matc
3404
3502
assert (mc );
3405
3503
JL_LOCK (& mc -> writelock );
3406
3504
mi = cache_method (jl_method_get_table (m ), mc , & mc -> cache , (jl_value_t * )mc , ti , m , world , min_valid , max_valid , env );
3407
- JL_UNLOCK (& mc -> writelock );
3408
3505
}
3409
3506
else {
3410
3507
jl_value_t * tt = jl_normalize_to_compilable_sig (ti , env , m , 1 );
@@ -3892,15 +3989,15 @@ JL_DLLEXPORT jl_value_t *jl_apply_generic(jl_value_t *F, jl_value_t **args, uint
3892
3989
return _jl_invoke (F , args , nargs , mfunc , world );
3893
3990
}
3894
3991
3895
- static jl_method_match_t * _gf_invoke_lookup (jl_value_t * types JL_PROPAGATES_ROOT , jl_methtable_t * mt , size_t world , size_t * min_valid , size_t * max_valid )
3992
+ static jl_method_match_t * _gf_invoke_lookup (jl_value_t * types JL_PROPAGATES_ROOT , jl_methtable_t * mt , size_t world , int cache_result , size_t * min_valid , size_t * max_valid )
3896
3993
{
3897
3994
jl_value_t * unw = jl_unwrap_unionall ((jl_value_t * )types );
3898
3995
if (!jl_is_tuple_type (unw ))
3899
3996
return NULL ;
3900
3997
if (jl_tparam0 (unw ) == jl_bottom_type )
3901
3998
return NULL ;
3902
3999
jl_methcache_t * mc = ((jl_methtable_t * )mt )-> cache ;
3903
- jl_value_t * matches = ml_matches ((jl_methtable_t * )mt , mc , (jl_tupletype_t * )types , 1 , 0 , 0 , world , 1 , min_valid , max_valid , NULL );
4000
+ jl_value_t * matches = ml_matches ((jl_methtable_t * )mt , mc , (jl_tupletype_t * )types , 1 , 0 , 0 , world , cache_result , min_valid , max_valid , NULL );
3904
4001
if (matches == jl_nothing || jl_array_nrows (matches ) != 1 )
3905
4002
return NULL ;
3906
4003
jl_method_match_t * matc = (jl_method_match_t * )jl_array_ptr_ref (matches , 0 );
@@ -3914,7 +4011,7 @@ JL_DLLEXPORT jl_value_t *jl_gf_invoke_lookup(jl_value_t *types, jl_value_t *mt,
3914
4011
size_t max_valid = ~(size_t )0 ;
3915
4012
if (mt == jl_nothing )
3916
4013
mt = (jl_value_t * )jl_method_table ;
3917
- jl_method_match_t * matc = _gf_invoke_lookup (types , (jl_methtable_t * )mt , world , & min_valid , & max_valid );
4014
+ jl_method_match_t * matc = _gf_invoke_lookup (types , (jl_methtable_t * )mt , world , 1 , & min_valid , & max_valid );
3918
4015
if (matc == NULL )
3919
4016
return jl_nothing ;
3920
4017
return (jl_value_t * )matc -> method ;
@@ -3925,7 +4022,7 @@ JL_DLLEXPORT jl_value_t *jl_gf_invoke_lookup_worlds(jl_value_t *types, jl_value_
3925
4022
{
3926
4023
if (mt == jl_nothing )
3927
4024
mt = (jl_value_t * )jl_method_table ;
3928
- jl_method_match_t * matc = _gf_invoke_lookup (types , (jl_methtable_t * )mt , world , min_world , max_world );
4025
+ jl_method_match_t * matc = _gf_invoke_lookup (types , (jl_methtable_t * )mt , world , 1 , min_world , max_world );
3929
4026
if (matc == NULL )
3930
4027
return jl_nothing ;
3931
4028
return (jl_value_t * )matc ;
@@ -3987,7 +4084,6 @@ jl_value_t *jl_gf_invoke_by_method(jl_method_t *method, jl_value_t *gf, jl_value
3987
4084
int sub = jl_subtype_matching ((jl_value_t * )tt , (jl_value_t * )method -> sig , & tpenv );
3988
4085
assert (sub ); (void )sub ;
3989
4086
}
3990
-
3991
4087
mfunc = cache_method (NULL , NULL , & method -> invokes , (jl_value_t * )method , tt , method , 1 , 1 , ~(size_t )0 , tpenv );
3992
4088
}
3993
4089
JL_UNLOCK (& method -> writelock );
@@ -4486,26 +4582,28 @@ static jl_value_t *ml_matches(jl_methtable_t *mt, jl_methcache_t *mc,
4486
4582
if (entry && (((jl_datatype_t * )unw )-> isdispatchtuple || entry -> guardsigs == jl_emptysvec )) {
4487
4583
jl_method_instance_t * mi = entry -> func .linfo ;
4488
4584
jl_method_t * meth = mi -> def .method ;
4489
- if (!jl_is_unionall (meth -> sig ) && ((jl_datatype_t * )unw )-> isdispatchtuple ) {
4490
- env .match .env = jl_emptysvec ;
4491
- env .match .ti = unw ;
4492
- }
4493
- else {
4494
- // this just calls jl_subtype_env (since we know that `type <: meth->sig` by transitivity)
4495
- env .match .ti = jl_type_intersection_env ((jl_value_t * )type , (jl_value_t * )meth -> sig , & env .match .env );
4496
- }
4497
- env .matc = make_method_match ((jl_tupletype_t * )env .match .ti ,
4498
- env .match .env , meth , FULLY_COVERS );
4499
- env .t = (jl_value_t * )jl_alloc_vec_any (1 );
4500
- jl_array_ptr_set (env .t , 0 , env .matc );
4501
4585
size_t min_world = jl_atomic_load_relaxed (& entry -> min_world );
4502
- size_t max_world = jl_atomic_load_relaxed (& entry -> max_world );
4503
- if (* min_valid < min_world )
4504
- * min_valid = min_world ;
4505
- if (* max_valid > max_world )
4506
- * max_valid = max_world ;
4507
- JL_GC_POP ();
4508
- return env .t ;
4586
+ if (min_world == meth -> primary_world ) {
4587
+ size_t max_world = jl_atomic_load_relaxed (& entry -> max_world );
4588
+ if (!jl_is_unionall (meth -> sig ) && ((jl_datatype_t * )unw )-> isdispatchtuple ) {
4589
+ env .match .env = jl_emptysvec ;
4590
+ env .match .ti = unw ;
4591
+ }
4592
+ else {
4593
+ // this just calls jl_subtype_env (since we know that `type <: meth->sig` by transitivity)
4594
+ env .match .ti = jl_type_intersection_env ((jl_value_t * )type , (jl_value_t * )meth -> sig , & env .match .env );
4595
+ }
4596
+ env .matc = make_method_match ((jl_tupletype_t * )env .match .ti ,
4597
+ env .match .env , meth , FULLY_COVERS );
4598
+ env .t = (jl_value_t * )jl_alloc_vec_any (1 );
4599
+ jl_array_ptr_set (env .t , 0 , env .matc );
4600
+ if (* min_valid < min_world )
4601
+ * min_valid = min_world ;
4602
+ if (* max_valid > max_world )
4603
+ * max_valid = max_world ;
4604
+ JL_GC_POP ();
4605
+ return env .t ;
4606
+ }
4509
4607
}
4510
4608
}
4511
4609
}
@@ -4770,7 +4868,6 @@ static jl_value_t *ml_matches(jl_methtable_t *mt, jl_methcache_t *mc,
4770
4868
jl_svec_t * tpenv = env .matc -> sparams ;
4771
4869
JL_LOCK (& mc -> writelock );
4772
4870
cache_method (mt , mc , & mc -> cache , (jl_value_t * )mc , (jl_tupletype_t * )unw , meth , world , env .match .min_valid , env .match .max_valid , tpenv );
4773
- JL_UNLOCK (& mc -> writelock );
4774
4871
}
4775
4872
}
4776
4873
* min_valid = env .match .min_valid ;
0 commit comments