Fixed 64 bit warnings.

git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@36928 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2010-05-24 21:41:20 +00:00
parent 9d570af7c8
commit 386be45abc
6 changed files with 26 additions and 23 deletions

View File

@ -786,7 +786,7 @@ Inode::_AddSmallData(Transaction& transaction, NodeGetter& nodeGetter,
item->type = HOST_ENDIAN_TO_BFS_INT32(type);
if (oldDataSize < pos) {
if ((uint64)oldDataSize < (uint64)pos) {
// Fill gap with zeros
memset(item->Data() + oldDataSize, 0, pos - oldDataSize);
}
@ -1199,7 +1199,7 @@ Inode::WriteAttribute(Transaction& transaction, const char* name, int32 type,
// Update index. Note, Index::Update() may be called even if
// initializing the index failed - it will just update the live
// queries in this case
if (pos < length || pos < oldLength) {
if (pos < length || (uint64)pos < (uint64)oldLength) {
index.Update(transaction, name, type, oldData, oldLength, buffer,
length, this);
}
@ -1555,7 +1555,7 @@ Inode::WriteAt(Transaction& transaction, off_t pos, const uint8* buffer,
// TODO: support INODE_LOGGED!
size_t length = *_length;
bool changeSize = pos + length > Size();
bool changeSize = (uint64)pos + (uint64)length > (uint64)Size();
// set/check boundaries for pos/length
if (pos < 0)
@ -1571,7 +1571,8 @@ Inode::WriteAt(Transaction& transaction, off_t pos, const uint8* buffer,
// Work around possible race condition: Someone might have shrunken the file
// while we had no lock.
if (!transaction.IsStarted() && pos + length > Size()) {
if (!transaction.IsStarted()
&& (uint64)pos + (uint64)length > (uint64)Size()) {
writeLocker.Unlock();
transaction.Start(fVolume, BlockNumber());
writeLocker.Lock();
@ -1579,7 +1580,7 @@ Inode::WriteAt(Transaction& transaction, off_t pos, const uint8* buffer,
off_t oldSize = Size();
if (pos + length > oldSize) {
if ((uint64)pos + (uint64)length > (uint64)oldSize) {
// let's grow the data stream to the size needed
status_t status = SetFileSize(transaction, pos + length);
if (status != B_OK) {

View File

@ -88,7 +88,7 @@ iterative_io_get_vecs_hook(void* cookie, io_request* request, off_t offset,
{
Inode* inode = (Inode*)cookie;
return file_map_translate(inode->Map(), offset, size, vecs, _count,
return file_map_translate(inode->Map(), offset, size, vecs, _count,
inode->GetVolume()->BlockSize());
}
@ -410,7 +410,7 @@ bfs_read_pages(fs_volume* _volume, fs_vnode* _node, void* _cookie,
while (true) {
file_io_vec fileVecs[8];
uint32 fileVecCount = 8;
size_t fileVecCount = 8;
status = file_map_translate(inode->Map(), pos, bytesLeft, fileVecs,
&fileVecCount, 0);
@ -455,7 +455,7 @@ bfs_write_pages(fs_volume* _volume, fs_vnode* _node, void* _cookie,
while (true) {
file_io_vec fileVecs[8];
uint32 fileVecCount = 8;
size_t fileVecCount = 8;
status = file_map_translate(inode->Map(), pos, bytesLeft, fileVecs,
&fileVecCount, 0);
@ -529,9 +529,11 @@ bfs_get_file_map(fs_volume* _volume, fs_vnode* _node, off_t offset, size_t size,
vecs[index].length = (run.Length() << blockShift) - offset + fileOffset;
// are we already done?
if (size <= vecs[index].length
|| offset + vecs[index].length >= inode->Size()) {
if (offset + vecs[index].length > inode->Size()) {
if ((uint64)size <= (uint64)vecs[index].length
|| (uint64)offset + (uint64)vecs[index].length
>= (uint64)inode->Size()) {
if ((uint64)offset + (uint64)vecs[index].length
> (uint64)inode->Size()) {
// make sure the extent ends with the last official file
// block (without taking any preallocations into account)
vecs[index].length = round_up(inode->Size() - offset,
@ -1479,7 +1481,7 @@ bfs_read_link(fs_volume* _volume, fs_vnode* _node, char* buffer,
RETURN_ERROR(B_BAD_VALUE);
if ((inode->Flags() & INODE_LONG_SYMLINK) != 0) {
if (inode->Size() < *_bufferSize)
if ((uint64)inode->Size() < (uint64)*_bufferSize)
*_bufferSize = inode->Size();
status_t status = inode->ReadAt(0, (uint8*)buffer, _bufferSize);

View File

@ -478,7 +478,7 @@ int get_attr_dir( const char *name, char **attr_buff, off_t *total_size )
char buff[80];
sprintf( buff, "read %ld, expected %ld",
(ssize_t)read_bytes, (ssize_t)fa_info.size );
(long)read_bytes, (long)fa_info.size );
zipwarn( "attribute size mismatch: ", buff );
}

View File

@ -164,7 +164,7 @@ cache_io(void *_cacheRef, void *cookie, fssh_off_t offset, fssh_addr_t buffer,
fssh_size_t size = *_size;
offset -= pageOffset;
if (offset + pageOffset + size > fileSize) {
if ((uint64_t)offset + pageOffset + size > (uint64_t)fileSize) {
// adapt size to be within the file's offsets
size = fileSize - pageOffset - offset;
*_size = size;

View File

@ -363,14 +363,14 @@ FileMap::Translate(fssh_off_t offset, fssh_size_t size, fssh_file_io_vec* vecs,
fssh_size_t maxVecs = *_count;
fssh_size_t padLastVec = 0;
if (offset >= Size()) {
if ((uint64_t)offset >= (uint64_t)Size()) {
*_count = 0;
return FSSH_B_OK;
}
if (offset + size > fSize) {
if ((uint64_t)offset + size > (uint64_t)fSize) {
if (align > 1) {
fssh_off_t alignedSize = (fSize + align - 1) & ~(fssh_off_t)(align - 1);
if (offset + size >= alignedSize)
if ((uint64_t)offset + size >= (uint64_t)alignedSize)
padLastVec = alignedSize - fSize;
}
size = fSize - offset;
@ -393,7 +393,7 @@ FileMap::Translate(fssh_off_t offset, fssh_size_t size, fssh_file_io_vec* vecs,
vecs[0].offset = fileExtent->disk.offset + offset;
vecs[0].length = fileExtent->disk.length - offset;
if (vecs[0].length >= size) {
if ((uint64_t)vecs[0].length >= (uint64_t)size) {
vecs[0].length = size + padLastVec;
*_count = 1;
return FSSH_B_OK;
@ -409,7 +409,7 @@ FileMap::Translate(fssh_off_t offset, fssh_size_t size, fssh_file_io_vec* vecs,
vecs[vecIndex++] = fileExtent->disk;
if (size <= fileExtent->disk.length) {
if ((uint64_t)size <= (uint64_t)fileExtent->disk.length) {
vecs[vecIndex - 1].length = size + padLastVec;
break;
}

View File

@ -1770,7 +1770,7 @@ common_file_io_vec_pages(int fd, const fssh_file_io_vec *fileVecs,
// a) also use this direct I/O for writes (otherwise, it would
// overwrite precious data)
// b) panic if the term below is true (at least for writes)
if (size > fileVecs[0].length) {
if ((uint64_t)size > (uint64_t)fileVecs[0].length) {
//dprintf("warning: device driver %p doesn't respect total length in read_pages() call!\n", ref->device);
size = fileVecs[0].length;
}
@ -1782,7 +1782,7 @@ common_file_io_vec_pages(int fd, const fssh_file_io_vec *fileVecs,
return FSSH_B_OK;
// if we reached the end of the file, we can return as well
if (size != fileVecs[0].length) {
if ((uint64_t)size != (uint64_t)fileVecs[0].length) {
*_numBytes = size;
return FSSH_B_OK;
}
@ -1811,7 +1811,7 @@ common_file_io_vec_pages(int fd, const fssh_file_io_vec *fileVecs,
for (; fileVecIndex < fileVecCount; fileVecIndex++) {
const fssh_file_io_vec &fileVec = fileVecs[fileVecIndex];
fssh_off_t fileOffset = fileVec.offset;
fssh_off_t fileLeft = fssh_min_c(fileVec.length, bytesLeft);
fssh_off_t fileLeft = fssh_min_c((uint64_t)fileVec.length, (uint64_t)bytesLeft);
TRACE(("FILE VEC [%lu] length %Ld\n", fileVecIndex, fileLeft));
@ -1825,7 +1825,7 @@ common_file_io_vec_pages(int fd, const fssh_file_io_vec *fileVecs,
size = 0;
// assign what is left of the current fileVec to the tempVecs
for (size = 0; size < fileLeft && vecIndex < vecCount
for (size = 0; (uint64_t)size < (uint64_t)fileLeft && vecIndex < vecCount
&& tempCount < MAX_TEMP_IO_VECS;) {
// try to satisfy one iovec per iteration (or as much as
// possible)