diff --git a/block/block-copy.c b/block/block-copy.c index 93eb1b2664..eddb0b81e0 100644 --- a/block/block-copy.c +++ b/block/block-copy.c @@ -584,7 +584,7 @@ static coroutine_fn int block_copy_task_entry(AioTask *task) BlockCopyState *s = t->s; bool error_is_read = false; BlockCopyMethod method = t->method; - int ret; + int ret = -1; WITH_GRAPH_RDLOCK_GUARD() { ret = block_copy_do_copy(s, t->req.offset, t->req.bytes, &method, diff --git a/block/file-posix.c b/block/file-posix.c index ff928b5e85..90fa54352c 100644 --- a/block/file-posix.c +++ b/block/file-posix.c @@ -1398,7 +1398,7 @@ static void raw_refresh_zoned_limits(BlockDriverState *bs, struct stat *st, Error **errp) { BDRVRawState *s = bs->opaque; - BlockZoneModel zoned; + BlockZoneModel zoned = BLK_Z_NONE; int ret; ret = get_sysfs_zoned_model(st, &zoned); diff --git a/block/mirror.c b/block/mirror.c index 61f0a717b7..2afe700b4d 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -349,7 +349,7 @@ static void coroutine_fn mirror_co_read(void *opaque) MirrorOp *op = opaque; MirrorBlockJob *s = op->s; int nb_chunks; - uint64_t ret; + int ret = -1; uint64_t max_bytes; max_bytes = s->granularity * s->max_iov; @@ -565,7 +565,7 @@ static void coroutine_fn GRAPH_UNLOCKED mirror_iteration(MirrorBlockJob *s) bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks); while (nb_chunks > 0 && offset < s->bdev_length) { - int ret; + int ret = -1; int64_t io_bytes; int64_t io_bytes_acct; MirrorMethod mirror_method = MIRROR_METHOD_COPY; @@ -841,7 +841,7 @@ static int coroutine_fn GRAPH_UNLOCKED mirror_dirty_init(MirrorBlockJob *s) int64_t offset; BlockDriverState *bs; BlockDriverState *target_bs = blk_bs(s->target); - int ret; + int ret = -1; int64_t count; bdrv_graph_co_rdlock(); @@ -931,7 +931,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) MirrorBDSOpaque *mirror_top_opaque = s->mirror_top_bs->opaque; BlockDriverState *target_bs = blk_bs(s->target); bool need_drain = true; - BlockDeviceIoStatus iostatus; + BlockDeviceIoStatus iostatus = BLOCK_DEVICE_IO_STATUS__MAX; int64_t length; int64_t target_length; BlockDriverInfo bdi; diff --git a/block/stream.c b/block/stream.c index 7031eef12b..9076203193 100644 --- a/block/stream.c +++ b/block/stream.c @@ -155,8 +155,8 @@ static void stream_clean(Job *job) static int coroutine_fn stream_run(Job *job, Error **errp) { StreamBlockJob *s = container_of(job, StreamBlockJob, common.job); - BlockDriverState *unfiltered_bs; - int64_t len; + BlockDriverState *unfiltered_bs = NULL; + int64_t len = -1; int64_t offset = 0; int error = 0; int64_t n = 0; /* bytes */ @@ -177,7 +177,7 @@ static int coroutine_fn stream_run(Job *job, Error **errp) for ( ; offset < len; offset += n) { bool copy; - int ret; + int ret = -1; /* Note that even when no rate limit is applied we need to yield * with no pending I/O here so that bdrv_drain_all() returns. diff --git a/fsdev/9p-iov-marshal.c b/fsdev/9p-iov-marshal.c index a1c9beddd2..0c5a1a0fa2 100644 --- a/fsdev/9p-iov-marshal.c +++ b/fsdev/9p-iov-marshal.c @@ -84,9 +84,12 @@ ssize_t v9fs_iov_vunmarshal(struct iovec *out_sg, int out_num, size_t offset, break; } case 'w': { - uint16_t val, *valp; + uint16_t val = 0, *valp; valp = va_arg(ap, uint16_t *); copied = v9fs_unpack(&val, out_sg, out_num, offset, sizeof(val)); + if (copied <= 0) { + break; + } if (bswap) { *valp = le16_to_cpu(val); } else { @@ -95,9 +98,12 @@ ssize_t v9fs_iov_vunmarshal(struct iovec *out_sg, int out_num, size_t offset, break; } case 'd': { - uint32_t val, *valp; + uint32_t val = 0, *valp; valp = va_arg(ap, uint32_t *); copied = v9fs_unpack(&val, out_sg, out_num, offset, sizeof(val)); + if (copied <= 0) { + break; + } if (bswap) { *valp = le32_to_cpu(val); } else { @@ -106,9 +112,12 @@ ssize_t v9fs_iov_vunmarshal(struct iovec *out_sg, int out_num, size_t offset, break; } case 'q': { - uint64_t val, *valp; + uint64_t val = 0, *valp; valp = va_arg(ap, uint64_t *); copied = v9fs_unpack(&val, out_sg, out_num, offset, sizeof(val)); + if (copied <= 0) { + break; + } if (bswap) { *valp = le64_to_cpu(val); } else { diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index 115795392c..9166d7974d 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -1060,7 +1060,7 @@ static void virtio_blk_dma_restart_cb(void *opaque, bool running, VirtIOBlock *s = opaque; uint16_t num_queues = s->conf.num_queues; g_autofree VirtIOBlockReq **vq_rq = NULL; - VirtIOBlockReq *rq; + VirtIOBlockReq *rq = NULL; if (!running) { return; diff --git a/hw/display/qxl.c b/hw/display/qxl.c index 3c2b5182ca..0c4b1c9bf2 100644 --- a/hw/display/qxl.c +++ b/hw/display/qxl.c @@ -1301,8 +1301,8 @@ static int qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta, }; uint64_t guest_start; uint64_t guest_end; - int pci_region; - pcibus_t pci_start; + int pci_region = -1; + pcibus_t pci_start = PCI_BAR_UNMAPPED; pcibus_t pci_end; MemoryRegion *mr; intptr_t virt_start; diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c index 7fc2a08df2..0eb24304ee 100644 --- a/hw/ide/ahci.c +++ b/hw/ide/ahci.c @@ -948,7 +948,6 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist, uint64_t sum = 0; int off_idx = -1; int64_t off_pos = -1; - int tbl_entry_size; IDEBus *bus = &ad->port; BusState *qbus = BUS(bus); @@ -976,6 +975,8 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist, /* Get entries in the PRDT, init a qemu sglist accordingly */ if (prdtl > 0) { AHCI_SG *tbl = (AHCI_SG *)prdt; + int tbl_entry_size = 0; + sum = 0; for (i = 0; i < prdtl; i++) { tbl_entry_size = prdt_tbl_entry_size(&tbl[i]); diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c index 49cff2a0cb..22d16dc26b 100644 --- a/hw/scsi/vhost-scsi.c +++ b/hw/scsi/vhost-scsi.c @@ -172,7 +172,7 @@ static int vhost_scsi_set_workers(VHostSCSICommon *vsc, bool per_virtqueue) struct vhost_dev *dev = &vsc->dev; struct vhost_vring_worker vq_worker; struct vhost_worker_state worker; - int i, ret; + int i, ret = 0; /* Use default worker */ if (!per_virtqueue || dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) { diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c index 87122e4245..ed01499391 100644 --- a/hw/sd/sdhci.c +++ b/hw/sd/sdhci.c @@ -747,7 +747,7 @@ static void sdhci_do_adma(SDHCIState *s) const uint16_t block_size = s->blksize & BLOCK_SIZE_MASK; const MemTxAttrs attrs = { .memory = true }; ADMADescr dscr = {}; - MemTxResult res; + MemTxResult res = MEMTX_ERROR; int i; if (s->trnmod & SDHC_TRNS_BLK_CNT_EN && !s->blkcnt) { diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c index fc5f408f77..37aca8b431 100644 --- a/hw/virtio/vhost-shadow-virtqueue.c +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -414,6 +414,7 @@ static uint16_t vhost_svq_last_desc_of_chain(const VhostShadowVirtqueue *svq, return i; } +G_GNUC_WARN_UNUSED_RESULT static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, uint32_t *len) { @@ -526,10 +527,11 @@ static void vhost_svq_flush(VhostShadowVirtqueue *svq, size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t num) { size_t len = 0; - uint32_t r; while (num--) { + g_autofree VirtQueueElement *elem = NULL; int64_t start_us = g_get_monotonic_time(); + uint32_t r = 0; do { if (vhost_svq_more_used(svq)) { @@ -541,7 +543,7 @@ size_t vhost_svq_poll(VhostShadowVirtqueue *svq, size_t num) } } while (true); - vhost_svq_get_buf(svq, &r); + elem = vhost_svq_get_buf(svq, &r); len += r; } diff --git a/linux-user/hppa/cpu_loop.c b/linux-user/hppa/cpu_loop.c index bc093b8fe8..23b38ff9b2 100644 --- a/linux-user/hppa/cpu_loop.c +++ b/linux-user/hppa/cpu_loop.c @@ -99,6 +99,8 @@ static abi_ulong hppa_lws(CPUHPPAState *env) #endif } break; + default: + g_assert_not_reached(); } break; } diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c index 5478d58de3..233acb0855 100644 --- a/migration/dirtyrate.c +++ b/migration/dirtyrate.c @@ -149,12 +149,12 @@ int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms, unsigned int flag, bool one_shot) { - DirtyPageRecord *records; + DirtyPageRecord *records = NULL; int64_t init_time_ms; int64_t duration; int64_t dirtyrate; int i = 0; - unsigned int gen_id; + unsigned int gen_id = 0; retry: init_time_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); diff --git a/migration/migration.c b/migration/migration.c index ae2be31557..021faee2f3 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -2278,7 +2278,7 @@ static bool migrate_handle_rp_resume_ack(MigrationState *s, */ static void migration_release_dst_files(MigrationState *ms) { - QEMUFile *file; + QEMUFile *file = NULL; WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) { /* diff --git a/migration/ram.c b/migration/ram.c index 81eda2736a..326ce7eb79 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -1793,7 +1793,7 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss) { RAMBlock *block; ram_addr_t offset; - bool dirty; + bool dirty = false; do { block = unqueue_page(rs, &offset); diff --git a/nbd/client-connection.c b/nbd/client-connection.c index f9da67c87e..b11e266807 100644 --- a/nbd/client-connection.c +++ b/nbd/client-connection.c @@ -410,7 +410,7 @@ nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info, */ void nbd_co_establish_connection_cancel(NBDClientConnection *conn) { - Coroutine *wait_co; + Coroutine *wait_co = NULL; WITH_QEMU_LOCK_GUARD(&conn->mutex) { wait_co = g_steal_pointer(&conn->wait_co); diff --git a/qom/object.c b/qom/object.c index 28c5b66eab..d3d3003541 100644 --- a/qom/object.c +++ b/qom/object.c @@ -2226,7 +2226,7 @@ Object *object_resolve_path_at(Object *parent, const char *path) Object *object_resolve_type_unambiguous(const char *typename, Error **errp) { - bool ambig; + bool ambig = false; Object *o = object_resolve_path_type("", typename, &ambig); if (ambig) { diff --git a/target/loongarch/gdbstub.c b/target/loongarch/gdbstub.c index 7ca245ee81..3a03cf9cba 100644 --- a/target/loongarch/gdbstub.c +++ b/target/loongarch/gdbstub.c @@ -34,26 +34,28 @@ void write_fcc(CPULoongArchState *env, uint64_t val) int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) { CPULoongArchState *env = cpu_env(cs); - uint64_t val; - - if (0 <= n && n < 32) { - val = env->gpr[n]; - } else if (n == 32) { - /* orig_a0 */ - val = 0; - } else if (n == 33) { - val = env->pc; - } else if (n == 34) { - val = env->CSR_BADV; - } if (0 <= n && n <= 34) { + uint64_t val; + + if (n < 32) { + val = env->gpr[n]; + } else if (n == 32) { + /* orig_a0 */ + val = 0; + } else if (n == 33) { + val = env->pc; + } else /* if (n == 34) */ { + val = env->CSR_BADV; + } + if (is_la64(env)) { return gdb_get_reg64(mem_buf, val); } else { return gdb_get_reg32(mem_buf, val); } } + return 0; } diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c index 666880472b..c112d5b189 100644 --- a/tests/unit/test-bdrv-drain.c +++ b/tests/unit/test-bdrv-drain.c @@ -722,7 +722,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, BlockJob *job; TestBlockJob *tjob; IOThread *iothread = NULL; - int ret; + int ret = -1; src = bdrv_new_open_driver(&bdrv_test, "source", BDRV_O_RDWR, &error_abort); diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c index 3766d5de6b..20ed54f570 100644 --- a/tests/unit/test-block-iothread.c +++ b/tests/unit/test-block-iothread.c @@ -745,7 +745,7 @@ static void test_propagate_mirror(void) AioContext *main_ctx = qemu_get_aio_context(); BlockDriverState *src, *target, *filter; BlockBackend *blk; - Job *job; + Job *job = NULL; Error *local_err = NULL; /* Create src and target*/ diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c index eb4eebefdf..64d6264fc7 100644 --- a/util/qemu-coroutine.c +++ b/util/qemu-coroutine.c @@ -136,7 +136,7 @@ static Coroutine *coroutine_pool_get_local(void) static void coroutine_pool_refill_local(void) { CoroutinePool *local_pool = get_ptr_local_pool(); - CoroutinePoolBatch *batch; + CoroutinePoolBatch *batch = NULL; WITH_QEMU_LOCK_GUARD(&global_pool_lock) { batch = QSLIST_FIRST(&global_pool); diff --git a/util/qemu-timer.c b/util/qemu-timer.c index 42b74c0444..ffe9a3c5c1 100644 --- a/util/qemu-timer.c +++ b/util/qemu-timer.c @@ -182,7 +182,7 @@ bool qemu_clock_has_timers(QEMUClockType type) bool timerlist_expired(QEMUTimerList *timer_list) { - int64_t expire_time; + int64_t expire_time = 0; if (!qatomic_read(&timer_list->active_timers)) { return false; @@ -212,7 +212,7 @@ bool qemu_clock_expired(QEMUClockType type) int64_t timerlist_deadline_ns(QEMUTimerList *timer_list) { int64_t delta; - int64_t expire_time; + int64_t expire_time = 0; if (!qatomic_read(&timer_list->active_timers)) { return -1; @@ -451,7 +451,7 @@ void timer_mod_ns(QEMUTimer *ts, int64_t expire_time) void timer_mod_anticipate_ns(QEMUTimer *ts, int64_t expire_time) { QEMUTimerList *timer_list = ts->timer_list; - bool rearm; + bool rearm = false; WITH_QEMU_LOCK_GUARD(&timer_list->active_timers_lock) { if (ts->expire_time == -1 || ts->expire_time > expire_time) {