@@ -19,7 +19,7 @@ Triggering reset
This section documents the APIs which "users" of a resettable object should use
to control it. All resettable control functions must be called while holding
-the iothread lock.
+the BQL.
You can apply a reset to an object using ``resettable_assert_reset()``. You need
to call ``resettable_release_reset()`` to release the object from reset. To
@@ -159,7 +159,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(PCIQXLDevice, PCI_QXL)
*
* Use with care; by the time this function returns, the returned pointer is
* not protected by RCU anymore. If the caller is not within an RCU critical
- * section and does not hold the iothread lock, it must have other means of
+ * section and does not hold the BQL, it must have other means of
* protecting the pointer, such as a reference to the region that includes
* the incoming ram_addr_t.
*
@@ -92,7 +92,7 @@ RAMBlock *qemu_ram_block_by_name(const char *name);
*
* By the time this function returns, the returned pointer is not protected
* by RCU anymore. If the caller is not within an RCU critical section and
- * does not hold the iothread lock, it must have other means of protecting the
+ * does not hold the BQL, it must have other means of protecting the
* pointer, such as a reference to the memory region that owns the RAMBlock.
*/
RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
@@ -1962,7 +1962,7 @@ int memory_region_get_fd(MemoryRegion *mr);
*
* Use with care; by the time this function returns, the returned pointer is
* not protected by RCU anymore. If the caller is not within an RCU critical
- * section and does not hold the iothread lock, it must have other means of
+ * section and does not hold the BQL, it must have other means of
* protecting the pointer, such as a reference to the region that includes
* the incoming ram_addr_t.
*
@@ -1979,7 +1979,7 @@ MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
*
* Use with care; by the time this function returns, the returned pointer is
* not protected by RCU anymore. If the caller is not within an RCU critical
- * section and does not hold the iothread lock, it must have other means of
+ * section and does not hold the BQL, it must have other means of
* protecting the pointer, such as a reference to the region that includes
* the incoming ram_addr_t.
*
@@ -34,7 +34,7 @@ struct RAMBlock {
ram_addr_t max_length;
void (*resized)(const char*, uint64_t length, void *host);
uint32_t flags;
- /* Protected by iothread lock. */
+ /* Protected by the BQL. */
char idstr[256];
/* RCU-enabled, writes protected by the ramlist lock */
QLIST_ENTRY(RAMBlock) next;
@@ -17,7 +17,7 @@
#include "hw/vmstate-if.h"
typedef struct SaveVMHandlers {
- /* This runs inside the iothread lock. */
+ /* This runs inside the BQL. */
SaveStateHandler *save_state;
/*
@@ -30,7 +30,7 @@ typedef struct SaveVMHandlers {
int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque);
int (*save_live_complete_precopy)(QEMUFile *f, void *opaque);
- /* This runs both outside and inside the iothread lock. */
+ /* This runs both outside and inside the BQL. */
bool (*is_active)(void *opaque);
bool (*has_postcopy)(void *opaque);
@@ -43,14 +43,14 @@ typedef struct SaveVMHandlers {
*/
bool (*is_active_iterate)(void *opaque);
- /* This runs outside the iothread lock in the migration case, and
+ /* This runs outside the BQL in the migration case, and
* within the lock in the savevm case. The callback had better only
* use data that is local to the migration thread or protected
* by other locks.
*/
int (*save_live_iterate)(QEMUFile *f, void *opaque);
- /* This runs outside the iothread lock! */
+ /* This runs outside the BQL! */
/* Note for save_live_pending:
* must_precopy:
* - must be migrated in precopy or in stopped state
@@ -940,7 +940,7 @@ static inline const char *aarch32_mode_name(uint32_t psr)
*
* Update the CPU_INTERRUPT_VIRQ bit in cs->interrupt_request, following
* a change to either the input VIRQ line from the GIC or the HCR_EL2.VI bit.
- * Must be called with the iothread lock held.
+ * Must be called with the BQL held.
*/
void arm_cpu_update_virq(ARMCPU *cpu);
@@ -949,7 +949,7 @@ void arm_cpu_update_virq(ARMCPU *cpu);
*
* Update the CPU_INTERRUPT_VFIQ bit in cs->interrupt_request, following
* a change to either the input VFIQ line from the GIC or the HCR_EL2.VF bit.
- * Must be called with the iothread lock held.
+ * Must be called with the BQL held.
*/
void arm_cpu_update_vfiq(ARMCPU *cpu);
@@ -1975,7 +1975,7 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
* @size: number of bytes
* @mmu_idx: virtual address context
* @ra: return address into tcg generated code, or 0
- * Context: iothread lock held
+ * Context: BQL held
*
* Load @size bytes from @addr, which is memory-mapped i/o.
* The bytes are concatenated in big-endian order with @ret_be.
@@ -2521,7 +2521,7 @@ static Int128 do_ld16_mmu(CPUState *cpu, vaddr addr,
* @size: number of bytes
* @mmu_idx: virtual address context
* @ra: return address into tcg generated code, or 0
- * Context: iothread lock held
+ * Context: BQL held
*
* Store @size bytes at @addr, which is memory-mapped i/o.
* The bytes to store are extracted in little-endian order from @val_le;
@@ -123,7 +123,7 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
if (cpu->icount_budget == 0) {
/*
- * We're called without the iothread lock, so must take it while
+ * We're called without the BQL, so must take it while
* we're calling timer handlers.
*/
bql_lock();
@@ -58,7 +58,7 @@ bool mpqemu_msg_send(MPQemuMsg *msg, QIOChannel *ioc, Error **errp)
assert(qemu_in_coroutine() || !iothread);
/*
- * Skip unlocking/locking iothread lock when the IOThread is running
+ * Skip unlocking/locking BQL when the IOThread is running
* in co-routine context. Co-routine context is asserted above
* for IOThread case.
* Also skip lock handling while in a co-routine in the main context.
@@ -464,7 +464,7 @@ static void send_bitmap_bits(QEMUFile *f, DBMSaveState *s,
g_free(buf);
}
-/* Called with iothread lock taken. */
+/* Called with the BQL taken. */
static void dirty_bitmap_do_save_cleanup(DBMSaveState *s)
{
SaveBitmapState *dbms;
@@ -479,7 +479,7 @@ static void dirty_bitmap_do_save_cleanup(DBMSaveState *s)
}
}
-/* Called with iothread lock taken. */
+/* Called with the BQL taken. */
static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs,
const char *bs_name, GHashTable *alias_map)
{
@@ -598,7 +598,7 @@ static int add_bitmaps_to_list(DBMSaveState *s, BlockDriverState *bs,
return 0;
}
-/* Called with iothread lock taken. */
+/* Called with the BQL taken. */
static int init_dirty_bitmap_migration(DBMSaveState *s)
{
BlockDriverState *bs;
@@ -607,7 +607,7 @@ static int init_dirty_bitmap_migration(DBMSaveState *s)
BlockBackend *blk;
GHashTable *alias_map = NULL;
- /* Runs in the migration thread, but holds the iothread lock */
+ /* Runs in the migration thread, but holds the BQL */
GLOBAL_STATE_CODE();
GRAPH_RDLOCK_GUARD_MAINLOOP();
@@ -742,7 +742,7 @@ static int dirty_bitmap_save_iterate(QEMUFile *f, void *opaque)
return s->bulk_completed;
}
-/* Called with iothread lock taken. */
+/* Called with the BQL taken. */
static int dirty_bitmap_save_complete(QEMUFile *f, void *opaque)
{
@@ -66,7 +66,7 @@ typedef struct BlkMigDevState {
/* Protected by block migration lock. */
int64_t completed_sectors;
- /* During migration this is protected by iothread lock / AioContext.
+ /* During migration this is protected by BQL / AioContext.
* Allocation and free happen during setup and cleanup respectively.
*/
BdrvDirtyBitmap *dirty_bitmap;
@@ -101,7 +101,7 @@ typedef struct BlkMigState {
int prev_progress;
int bulk_completed;
- /* Lock must be taken _inside_ the iothread lock and any AioContexts. */
+ /* Lock must be taken _inside_ the BQL and any AioContexts. */
QemuMutex lock;
} BlkMigState;
@@ -117,7 +117,7 @@ static void blk_mig_unlock(void)
qemu_mutex_unlock(&block_mig_state.lock);
}
-/* Must run outside of the iothread lock during the bulk phase,
+/* Must run outside of the BQL during the bulk phase,
* or the VM will stall.
*/
@@ -334,7 +334,7 @@ static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
return (bmds->cur_sector >= total_sectors);
}
-/* Called with iothread lock taken. */
+/* Called with the BQL taken. */
static int set_dirty_tracking(void)
{
@@ -361,7 +361,7 @@ fail:
return ret;
}
-/* Called with iothread lock taken. */
+/* Called with the BQL taken. */
static void unset_dirty_tracking(void)
{
@@ -512,7 +512,7 @@ static void blk_mig_reset_dirty_cursor(void)
}
}
-/* Called with iothread lock and AioContext taken. */
+/* Called with the BQL and AioContext taken. */
static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
int is_async)
@@ -594,7 +594,7 @@ error:
return ret;
}
-/* Called with iothread lock taken.
+/* Called with the BQL taken.
*
* return value:
* 0: too much data for max_downtime
@@ -658,7 +658,7 @@ static int flush_blks(QEMUFile *f)
return ret;
}
-/* Called with iothread lock taken. */
+/* Called with the BQL taken. */
static int64_t get_remaining_dirty(void)
{
@@ -676,7 +676,7 @@ static int64_t get_remaining_dirty(void)
-/* Called with iothread lock taken. */
+/* Called with the BQL taken. */
static void block_migration_cleanup_bmds(void)
{
BlkMigDevState *bmds;
@@ -706,7 +706,7 @@ static void block_migration_cleanup_bmds(void)
}
}
-/* Called with iothread lock taken. */
+/* Called with the BQL taken. */
static void block_migration_cleanup(void *opaque)
{
BlkMigBlock *blk;
@@ -783,7 +783,7 @@ static int block_save_iterate(QEMUFile *f, void *opaque)
}
ret = 0;
} else {
- /* Always called with iothread lock taken for
+ /* Always called with the BQL taken for
* simplicity, block_save_complete also calls it.
*/
bql_lock();
@@ -811,7 +811,7 @@ static int block_save_iterate(QEMUFile *f, void *opaque)
return (delta_bytes > 0);
}
-/* Called with iothread lock taken. */
+/* Called with the BQL taken. */
static int block_save_complete(QEMUFile *f, void *opaque)
{
@@ -945,7 +945,7 @@ int coroutine_fn colo_incoming_co(void)
qemu_thread_join(&th);
bql_lock();
- /* We hold the global iothread lock, so it is safe here */
+ /* We hold the global BQL, so it is safe here */
colo_release_ram_cache();
return 0;
@@ -2567,7 +2567,7 @@ fail:
/**
* migration_maybe_pause: Pause if required to by
- * migrate_pause_before_switchover called with the iothread locked
+ * migrate_pause_before_switchover called with the BQL locked
* Returns: 0 on success
*/
static int migration_maybe_pause(MigrationState *s,
@@ -2395,7 +2395,7 @@ static void ram_save_cleanup(void *opaque)
/* We don't use dirty log with background snapshots */
if (!migrate_background_snapshot()) {
- /* caller have hold iothread lock or is in a bh, so there is
+ /* caller have hold BQL or is in a bh, so there is
* no writing race against the migration bitmap
*/
if (global_dirty_tracking & GLOBAL_DIRTY_MIGRATION) {
@@ -3131,7 +3131,7 @@ out:
*
* Returns zero to indicate success or negative on error
*
- * Called with iothread lock
+ * Called with the BQL
*
* @f: QEMUFile where to send the data
* @opaque: RAMState pointer
@@ -799,7 +799,7 @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
abort();
found:
- /* It is safe to write mru_block outside the iothread lock. This
+ /* It is safe to write mru_block outside the BQL. This
* is what happens:
*
* mru_block = xxx
@@ -1597,7 +1597,7 @@ int qemu_ram_get_fd(RAMBlock *rb)
return rb->fd;
}
-/* Called with iothread lock held. */
+/* Called with the BQL held. */
void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
{
RAMBlock *block;
@@ -1625,7 +1625,7 @@ void qemu_ram_set_idstr(RAMBlock *new_block, const char *name, DeviceState *dev)
}
}
-/* Called with iothread lock held. */
+/* Called with the BQL held. */
void qemu_ram_unset_idstr(RAMBlock *block)
{
/* FIXME: arch_init.c assumes that this is not called throughout
@@ -5824,7 +5824,7 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
* Updates to VI and VF require us to update the status of
* virtual interrupts, which are the logical OR of these bits
* and the state of the input lines from the GIC. (This requires
- * that we have the iothread lock, which is done by marking the
+ * that we have the BQL, which is done by marking the
* reginfo structs as ARM_CP_IO.)
* Note that if a write to HCR pends a VIRQ or VFIQ it is never
* possible for it to be taken immediately, because VIRQ and
@@ -217,7 +217,7 @@ static void channel_event(int event, SpiceChannelEventInfo *info)
* not do that. It isn't that easy to fix it in spice and even
* when it is fixed we still should cover the already released
* spice versions. So detect that we've been called from another
- * thread and grab the iothread lock if so before calling qemu
+ * thread and grab the BQL if so before calling qemu
* functions.
*/
bool need_lock = !qemu_thread_is_self(&me);
@@ -409,7 +409,7 @@ static void rcu_init_complete(void)
qemu_event_init(&rcu_call_ready_event, false);
- /* The caller is assumed to have iothread lock, so the call_rcu thread
+ /* The caller is assumed to have BQL, so the call_rcu thread
* must have been quiescent even after forking, just recreate it.
*/
qemu_thread_create(&thread, "call_rcu", call_rcu_thread,
@@ -299,7 +299,7 @@ static ret_type glue(coreaudio_, name)args_decl \
#undef COREAUDIO_WRAPPER_FUNC
/*
- * callback to feed audiooutput buffer. called without iothread lock.
+ * callback to feed audiooutput buffer. called without BQL.
* allowed to lock "buf_mutex", but disallowed to have any other locks.
*/
static OSStatus audioDeviceIOProc(
@@ -538,7 +538,7 @@ static void update_device_playback_state(coreaudioVoiceOut *core)
}
}
-/* called without iothread lock. */
+/* called without BQL. */
static OSStatus handle_voice_change(
AudioObjectID in_object_id,
UInt32 in_number_addresses,
@@ -113,7 +113,7 @@ static void cocoa_switch(DisplayChangeListener *dcl,
static QemuClipboardInfo *cbinfo;
static QemuEvent cbevent;
-// Utility functions to run specified code block with iothread lock held
+// Utility functions to run specified code block with the BQL held
typedef void (^CodeBlock)(void);
typedef bool (^BoolCodeBlock)(void);
@@ -548,7 +548,7 @@ - (void) setContentDimensions
- (void) updateUIInfoLocked
{
- /* Must be called with the iothread lock, i.e. via updateUIInfo */
+ /* Must be called with the BQL, i.e. via updateUIInfo */
NSSize frameSize;
QemuUIInfo info;
@@ -2075,7 +2075,7 @@ static void cocoa_display_init(DisplayState *ds, DisplayOptions *opts)
* Create the menu entries which depend on QEMU state (for consoles
* and removable devices). These make calls back into QEMU functions,
* which is OK because at this point we know that the second thread
- * holds the iothread lock and is synchronously waiting for us to
+ * holds the BQL and is synchronously waiting for us to
* finish.
*/
add_console_menu_entries();