fix a bug of 32-bit

This commit is contained in:
blueloveTH 2024-12-14 13:09:35 +08:00
parent 76af7c8de2
commit 8999de5ad7
6 changed files with 37 additions and 21 deletions

17
build_g_32.sh Normal file
View File

@ -0,0 +1,17 @@
set -e
python prebuild.py
SRC=$(find src/ -name "*.c")
FLAGS="-std=c11 -lm -ldl -I3rd/lz4 -Iinclude -O0 -Wfatal-errors -g -DDEBUG -DPK_ENABLE_OS=1 -DPK_BUILD_MODULE_LZ4=1"
SANITIZE_FLAGS="-fsanitize=address,leak,undefined"
if [ "$(uname)" == "Darwin" ]; then
SANITIZE_FLAGS="-fsanitize=address,undefined"
fi
echo "Compiling C files..."
gcc -m32 $FLAGS $SANITIZE_FLAGS $SRC src2/main.c 3rd/lz4/lz4libs/lz4.c -o main

View File

@ -2,59 +2,58 @@
#include <stdlib.h>
#include <string.h>
#include "pocketpy/common/utils.h"
void c11_vector__ctor(c11_vector* self, int elem_size){
void c11_vector__ctor(c11_vector* self, int elem_size) {
self->data = NULL;
self->length = 0;
self->capacity = 0;
self->elem_size = elem_size;
}
void c11_vector__dtor(c11_vector* self){
void c11_vector__dtor(c11_vector* self) {
if(self->data) free(self->data);
self->data = NULL;
self->length = 0;
self->capacity = 0;
}
c11_vector c11_vector__copy(const c11_vector* self){
c11_vector c11_vector__copy(const c11_vector* self) {
c11_vector retval;
c11_vector__ctor(&retval, self->elem_size);
c11_vector__reserve(&retval, self->capacity);
memcpy(retval.data, self->data, self->elem_size * self->length);
memcpy(retval.data, self->data, (size_t)self->elem_size * (size_t)self->length);
retval.length = self->length;
return retval;
}
void c11_vector__reserve(c11_vector* self, int capacity){
void c11_vector__reserve(c11_vector* self, int capacity) {
if(capacity < 4) capacity = 4;
if(capacity <= self->capacity) return;
// self->elem_size * capacity may overflow
self->data = realloc(self->data, (size_t)self->elem_size * (size_t)capacity);
if(self->data == NULL) c11__abort("c11_vector__reserve(): out of memory");
self->capacity = capacity;
}
void c11_vector__clear(c11_vector* self){
self->length = 0;
}
void c11_vector__clear(c11_vector* self) { self->length = 0; }
void* c11_vector__emplace(c11_vector* self){
if(self->length == self->capacity) c11_vector__reserve(self, self->capacity*2);
void* p = (char*)self->data + self->elem_size * self->length;
void* c11_vector__emplace(c11_vector* self) {
if(self->length == self->capacity) c11_vector__reserve(self, self->capacity * 2);
void* p = (char*)self->data + (size_t)self->elem_size * (size_t)self->length;
self->length++;
return p;
}
bool c11_vector__contains(const c11_vector *self, void *elem){
for(int i = 0; i < self->length; i++){
void* p = (char*)self->data + self->elem_size * i;
bool c11_vector__contains(const c11_vector* self, void* elem) {
for(int i = 0; i < self->length; i++) {
void* p = (char*)self->data + (size_t)self->elem_size * (size_t)i;
if(memcmp(p, elem, self->elem_size) == 0) return true;
}
return false;
}
void* c11_vector__submit(c11_vector* self, int* length){
void* c11_vector__submit(c11_vector* self, int* length) {
void* retval = self->data;
*length = self->length;
self->data = NULL;

View File

@ -14,7 +14,7 @@
timespec_get(&tms, TIME_UTC);
#endif
/* seconds, multiplied with 1 billion */
int64_t nanos = tms.tv_sec * NANOS_PER_SEC;
int64_t nanos = tms.tv_sec * (int64_t)NANOS_PER_SEC;
/* Add full nanoseconds */
nanos += tms.tv_nsec;
return nanos;

View File

@ -455,7 +455,7 @@ static bool builtins_ord(int argc, py_Ref argv) {
static bool builtins_id(int argc, py_Ref argv) {
PY_CHECK_ARGC(1);
if(argv->is_ptr) {
py_newint(py_retval(), (py_i64)argv->_obj);
py_newint(py_retval(), (intptr_t)argv->_obj);
} else {
py_newnone(py_retval());
}

View File

@ -16,7 +16,7 @@ bool pk__object_new(int argc, py_Ref argv) {
static bool object__hash__(int argc, py_Ref argv) {
PY_CHECK_ARGC(1);
assert(argv->is_ptr);
py_newint(py_retval(), (py_i64)argv->_obj);
py_newint(py_retval(), (intptr_t)argv->_obj);
return true;
}

View File

@ -26,6 +26,6 @@ for i in range(100):
ratio = test(gen_data())
# print(f'compression ratio: {ratio:.2f}')
# test 100MB of random data
rnd = [random.randint(0, 255) for _ in range(1024*1024*100)]
# test 1GB random data
rnd = [random.randint(0, 255) for _ in range(1024*1024*1024//16)]
test(bytes(rnd))