@@ -96,11 +96,7 @@ ensure_shared_on_resize(PyListObject *self)
9696 * of the new slots at exit is undefined heap trash; it's the caller's
9797 * responsibility to overwrite them with sane values.
9898 * The number of allocated elements may grow, shrink, or stay the same.
99- * Failure is impossible if newsize <= self.allocated on entry, although
100- * that partly relies on an assumption that the system realloc() never
101- * fails when passed a number of bytes <= the number of bytes last
102- * allocated (the C standard doesn't guarantee this, but it's hard to
103- * imagine a realloc implementation where it wouldn't be true).
99+ * Failure is impossible if newsize <= self.allocated on entry.
104100 * Note that self->ob_item may change, and even if newsize is less
105101 * than ob_size on entry.
106102 */
@@ -145,6 +141,11 @@ list_resize(PyListObject *self, Py_ssize_t newsize)
145141#ifdef Py_GIL_DISABLED
146142 _PyListArray * array = list_allocate_array (new_allocated );
147143 if (array == NULL ) {
144+ if (newsize < allocated ) {
145+ // Never fail when shrinking allocations
146+ Py_SET_SIZE (self , newsize );
147+ return 0 ;
148+ }
148149 PyErr_NoMemory ();
149150 return -1 ;
150151 }
@@ -178,6 +179,11 @@ list_resize(PyListObject *self, Py_ssize_t newsize)
178179 items = NULL ;
179180 }
180181 if (items == NULL ) {
182+ if (newsize < allocated ) {
183+ // Never fail when shrinking allocations
184+ Py_SET_SIZE (self , newsize );
185+ return 0 ;
186+ }
181187 PyErr_NoMemory ();
182188 return -1 ;
183189 }
@@ -818,8 +824,8 @@ list_repeat_lock_held(PyListObject *a, Py_ssize_t n)
818824 _Py_RefcntAdd (* src , n );
819825 * dest ++ = * src ++ ;
820826 }
821- // TODO: _Py_memory_repeat calls are not safe for shared lists in
822- // GIL_DISABLED builds. (See issue #129069)
827+ // This list is not yet visible to other threads, so atomic repeat
828+ // is not necessary even in Py_GIL_DISABLED builds.
823829 _Py_memory_repeat ((char * )np -> ob_item , sizeof (PyObject * )* output_size ,
824830 sizeof (PyObject * )* input_size );
825831 }
@@ -882,6 +888,34 @@ list_clear_slot(PyObject *self)
882888 return 0 ;
883889}
884890
891+ // Pointer-by-pointer memmove for PyObject** arrays that is safe
892+ // for shared lists in Py_GIL_DISABLED builds.
893+ static void
894+ ptr_wise_atomic_memmove (PyListObject * a , PyObject * * dest , PyObject * * src , Py_ssize_t n )
895+ {
896+ #ifndef Py_GIL_DISABLED
897+ memmove (dest , src , n * sizeof (PyObject * ));
898+ #else
899+ _Py_CRITICAL_SECTION_ASSERT_OBJECT_LOCKED (a );
900+ if (_Py_IsOwnedByCurrentThread ((PyObject * )a ) && !_PyObject_GC_IS_SHARED (a )) {
901+ // No other threads can read this list concurrently
902+ memmove (dest , src , n * sizeof (PyObject * ));
903+ return ;
904+ }
905+ if (dest < src ) {
906+ for (Py_ssize_t i = 0 ; i != n ; i ++ ) {
907+ _Py_atomic_store_ptr_release (& dest [i ], src [i ]);
908+ }
909+ }
910+ else {
911+ // copy backwards to avoid overwriting src before it's read
912+ for (Py_ssize_t i = n ; i != 0 ; i -- ) {
913+ _Py_atomic_store_ptr_release (& dest [i - 1 ], src [i - 1 ]);
914+ }
915+ }
916+ #endif
917+ }
918+
885919/* a[ilow:ihigh] = v if v != NULL.
886920 * del a[ilow:ihigh] if v == NULL.
887921 *
@@ -952,27 +986,17 @@ list_ass_slice_lock_held(PyListObject *a, Py_ssize_t ilow, Py_ssize_t ihigh, PyO
952986 }
953987
954988 if (d < 0 ) { /* Delete -d items */
955- Py_ssize_t tail ;
956- tail = (Py_SIZE (a ) - ihigh ) * sizeof (PyObject * );
957- // TODO: these memmove/memcpy calls are not safe for shared lists in
958- // GIL_DISABLED builds. (See issue #129069)
959- memmove (& item [ihigh + d ], & item [ihigh ], tail );
960- if (list_resize (a , Py_SIZE (a ) + d ) < 0 ) {
961- memmove (& item [ihigh ], & item [ihigh + d ], tail );
962- memcpy (& item [ilow ], recycle , s );
963- goto Error ;
964- }
989+ Py_ssize_t tail = Py_SIZE (a ) - ihigh ;
990+ ptr_wise_atomic_memmove (a , & item [ihigh + d ], & item [ihigh ], tail );
991+ (void )list_resize (a , Py_SIZE (a ) + d ); // NB: shrinking a list can't fail
965992 item = a -> ob_item ;
966993 }
967994 else if (d > 0 ) { /* Insert d items */
968995 k = Py_SIZE (a );
969996 if (list_resize (a , k + d ) < 0 )
970997 goto Error ;
971998 item = a -> ob_item ;
972- // TODO: these memmove/memcpy calls are not safe for shared lists in
973- // GIL_DISABLED builds. (See issue #129069)
974- memmove (& item [ihigh + d ], & item [ihigh ],
975- (k - ihigh )* sizeof (PyObject * ));
999+ ptr_wise_atomic_memmove (a , & item [ihigh + d ], & item [ihigh ], k - ihigh );
9761000 }
9771001 for (k = 0 ; k < n ; k ++ , ilow ++ ) {
9781002 PyObject * w = vitem [k ];
@@ -1056,10 +1080,17 @@ list_inplace_repeat_lock_held(PyListObject *self, Py_ssize_t n)
10561080 for (Py_ssize_t j = 0 ; j < input_size ; j ++ ) {
10571081 _Py_RefcntAdd (items [j ], n - 1 );
10581082 }
1059- // TODO: _Py_memory_repeat calls are not safe for shared lists in
1060- // GIL_DISABLED builds. (See issue #129069)
1083+ #ifndef Py_GIL_DISABLED
10611084 _Py_memory_repeat ((char * )items , sizeof (PyObject * )* output_size ,
10621085 sizeof (PyObject * )* input_size );
1086+ #else
1087+ Py_ssize_t copied = input_size ;
1088+ while (copied < output_size ) {
1089+ Py_ssize_t items_to_copy = Py_MIN (copied , output_size - copied );
1090+ ptr_wise_atomic_memmove (self , items + copied , items , items_to_copy );
1091+ copied += items_to_copy ;
1092+ }
1093+ #endif
10631094 return 0 ;
10641095}
10651096
@@ -1532,7 +1563,6 @@ list_pop_impl(PyListObject *self, Py_ssize_t index)
15321563/*[clinic end generated code: output=6bd69dcb3f17eca8 input=c269141068ae4b8f]*/
15331564{
15341565 PyObject * v ;
1535- int status ;
15361566
15371567 if (Py_SIZE (self ) == 0 ) {
15381568 /* Special-case most common failure cause */
@@ -1548,27 +1578,18 @@ list_pop_impl(PyListObject *self, Py_ssize_t index)
15481578
15491579 PyObject * * items = self -> ob_item ;
15501580 v = items [index ];
1551- const Py_ssize_t size_after_pop = Py_SIZE (self ) - 1 ;
1552- if (size_after_pop == 0 ) {
1581+ if (Py_SIZE (self ) == 1 ) {
15531582 Py_INCREF (v );
15541583 list_clear (self );
1555- status = 0 ;
1556- }
1557- else {
1558- if ((size_after_pop - index ) > 0 ) {
1559- memmove (& items [index ], & items [index + 1 ], (size_after_pop - index ) * sizeof (PyObject * ));
1560- }
1561- status = list_resize (self , size_after_pop );
1584+ return v ;
15621585 }
1563- if (status >= 0 ) {
1564- return v ; // and v now owns the reference the list had
1565- }
1566- else {
1567- // list resize failed, need to restore
1568- memmove (& items [index + 1 ], & items [index ], (size_after_pop - index )* sizeof (PyObject * ));
1569- items [index ] = v ;
1570- return NULL ;
1586+ Py_ssize_t size_after_pop = Py_SIZE (self ) - 1 ;
1587+ if (index < size_after_pop ) {
1588+ ptr_wise_atomic_memmove (self , & items [index ], & items [index + 1 ],
1589+ size_after_pop - index );
15711590 }
1591+ list_resize (self , size_after_pop ); // NB: shrinking a list can't fail
1592+ return v ;
15721593}
15731594
15741595/* Reverse a slice of a list in place, from lo up to (exclusive) hi. */
0 commit comments