14#include "internal/array.h"
15#include "internal/bits.h"
16#include "internal/error.h"
17#include "internal/numeric.h"
18#include "internal/string.h"
19#include "internal/thread.h"
22VALUE rb_eIOBufferLockedError;
23VALUE rb_eIOBufferAllocationError;
24VALUE rb_eIOBufferAccessError;
25VALUE rb_eIOBufferInvalidatedError;
26VALUE rb_eIOBufferMaskError;
28size_t RUBY_IO_BUFFER_PAGE_SIZE;
29size_t RUBY_IO_BUFFER_DEFAULT_SIZE;
40 enum rb_io_buffer_flags flags;
50io_buffer_map_memory(
size_t size,
int flags)
53 void * base = VirtualAlloc(0, size, MEM_COMMIT, PAGE_READWRITE);
59 int mmap_flags = MAP_ANONYMOUS;
60 if (flags & RB_IO_BUFFER_SHARED) {
61 mmap_flags |= MAP_SHARED;
64 mmap_flags |= MAP_PRIVATE;
67 void * base = mmap(NULL, size, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
69 if (base == MAP_FAILED) {
78io_buffer_map_file(
struct rb_io_buffer *buffer,
int descriptor,
size_t size, rb_off_t offset,
enum rb_io_buffer_flags flags)
81 HANDLE file = (HANDLE)_get_osfhandle(descriptor);
82 if (!file)
rb_sys_fail(
"io_buffer_map_descriptor:_get_osfhandle");
84 DWORD protect = PAGE_READONLY, access = FILE_MAP_READ;
86 if (flags & RB_IO_BUFFER_READONLY) {
87 buffer->flags |= RB_IO_BUFFER_READONLY;
90 protect = PAGE_READWRITE;
91 access = FILE_MAP_WRITE;
94 HANDLE mapping = CreateFileMapping(file, NULL, protect, 0, 0, NULL);
95 if (!mapping)
rb_sys_fail(
"io_buffer_map_descriptor:CreateFileMapping");
97 if (flags & RB_IO_BUFFER_PRIVATE) {
98 access |= FILE_MAP_COPY;
99 buffer->flags |= RB_IO_BUFFER_PRIVATE;
103 buffer->flags |= RB_IO_BUFFER_EXTERNAL;
104 buffer->flags |= RB_IO_BUFFER_SHARED;
107 void *base = MapViewOfFile(mapping, access, (DWORD)(offset >> 32), (DWORD)(offset & 0xFFFFFFFF), size);
110 CloseHandle(mapping);
114 buffer->mapping = mapping;
116 int protect = PROT_READ, access = 0;
118 if (flags & RB_IO_BUFFER_READONLY) {
119 buffer->flags |= RB_IO_BUFFER_READONLY;
122 protect |= PROT_WRITE;
125 if (flags & RB_IO_BUFFER_PRIVATE) {
126 buffer->flags |= RB_IO_BUFFER_PRIVATE;
130 buffer->flags |= RB_IO_BUFFER_EXTERNAL;
131 buffer->flags |= RB_IO_BUFFER_SHARED;
132 access |= MAP_SHARED;
135 void *base = mmap(NULL, size, protect, access, descriptor, offset);
137 if (base == MAP_FAILED) {
145 buffer->flags |= RB_IO_BUFFER_MAPPED;
149io_buffer_unmap(
void* base,
size_t size)
152 VirtualFree(base, 0, MEM_RELEASE);
159io_buffer_experimental(
void)
161 static int warned = 0;
169 "IO::Buffer is experimental and both the Ruby and C interface may change in the future!"
180 buffer->mapping = NULL;
182 buffer->source =
Qnil;
186io_buffer_initialize(
struct rb_io_buffer *buffer,
void *base,
size_t size,
enum rb_io_buffer_flags flags,
VALUE source)
193 if (flags & RB_IO_BUFFER_INTERNAL) {
194 base = calloc(size, 1);
196 else if (flags & RB_IO_BUFFER_MAPPED) {
197 base = io_buffer_map_memory(size, flags);
201 rb_raise(rb_eIOBufferAllocationError,
"Could not allocate buffer!");
211 buffer->flags = flags;
212 buffer->source = source;
219 if (buffer->flags & RB_IO_BUFFER_INTERNAL) {
223 if (buffer->flags & RB_IO_BUFFER_MAPPED) {
224 io_buffer_unmap(buffer->base, buffer->size);
236 if (buffer->mapping) {
237 CloseHandle(buffer->mapping);
238 buffer->mapping = NULL;
243 buffer->source =
Qnil;
252rb_io_buffer_type_mark(
void *_buffer)
255 rb_gc_mark(buffer->source);
259rb_io_buffer_type_free(
void *_buffer)
263 io_buffer_free(buffer);
269rb_io_buffer_type_size(
const void *_buffer)
275 total += buffer->size;
282 .wrap_struct_name =
"IO::Buffer",
284 .dmark = rb_io_buffer_type_mark,
285 .dfree = rb_io_buffer_type_free,
286 .dsize = rb_io_buffer_type_size,
289 .flags = RUBY_TYPED_FREE_IMMEDIATELY,
294io_buffer_extract_offset(
VALUE argument)
296 if (rb_int_negative_p(argument)) {
307io_buffer_extract_length(
VALUE argument)
309 if (rb_int_negative_p(argument)) {
319io_buffer_extract_size(
VALUE argument)
321 if (rb_int_negative_p(argument)) {
333io_buffer_default_length(
const struct rb_io_buffer *buffer,
size_t offset)
335 if (offset > buffer->size) {
340 return buffer->size - offset;
349io_buffer_extract_length_offset(
VALUE self,
int argc,
VALUE argv[],
size_t *length,
size_t *offset)
355 *offset = io_buffer_extract_offset(argv[1]);
361 if (argc >= 1 && !
NIL_P(argv[0])) {
362 *length = io_buffer_extract_length(argv[0]);
365 *length = io_buffer_default_length(buffer, *offset);
375io_buffer_extract_offset_length(
VALUE self,
int argc,
VALUE argv[],
size_t *offset,
size_t *length)
381 *offset = io_buffer_extract_offset(argv[0]);
388 *length = io_buffer_extract_length(argv[1]);
391 *length = io_buffer_default_length(buffer, *offset);
398rb_io_buffer_type_allocate(
VALUE self)
403 io_buffer_zero(buffer);
408static VALUE io_buffer_for_make_instance(
VALUE klass,
VALUE string,
enum rb_io_buffer_flags flags)
410 VALUE instance = rb_io_buffer_type_allocate(klass);
415 flags |= RB_IO_BUFFER_EXTERNAL;
418 flags |= RB_IO_BUFFER_READONLY;
420 if (!(flags & RB_IO_BUFFER_READONLY))
432 enum rb_io_buffer_flags flags;
436io_buffer_for_yield_instance(
VALUE _arguments)
440 arguments->instance = io_buffer_for_make_instance(arguments->klass, arguments->string, arguments->flags);
444 return rb_yield(arguments->instance);
448io_buffer_for_yield_instance_ensure(
VALUE _arguments)
452 if (arguments->instance !=
Qnil) {
453 rb_io_buffer_free(arguments->instance);
498rb_io_buffer_type_for(
VALUE klass,
VALUE string)
512 return rb_ensure(io_buffer_for_yield_instance, (
VALUE)&arguments, io_buffer_for_yield_instance_ensure, (
VALUE)&arguments);
516 string = rb_str_tmp_frozen_acquire(
string);
517 return io_buffer_for_make_instance(klass,
string, RB_IO_BUFFER_READONLY);
522rb_io_buffer_new(
void *base,
size_t size,
enum rb_io_buffer_flags flags)
524 VALUE instance = rb_io_buffer_type_allocate(rb_cIOBuffer);
529 io_buffer_initialize(buffer, base, size, flags,
Qnil);
535rb_io_buffer_map(
VALUE io,
size_t size, rb_off_t offset,
enum rb_io_buffer_flags flags)
537 io_buffer_experimental();
539 VALUE instance = rb_io_buffer_type_allocate(rb_cIOBuffer);
546 io_buffer_map_file(buffer, descriptor, size, offset, flags);
591io_buffer_map(
int argc,
VALUE *argv,
VALUE klass)
599 if (argc >= 2 && !
RB_NIL_P(argv[1])) {
600 size = io_buffer_extract_size(argv[1]);
603 rb_off_t file_size = rb_file_size(io);
610 else if ((uintmax_t)file_size > SIZE_MAX) {
615 size = (size_t)file_size;
625 enum rb_io_buffer_flags flags = 0;
630 return rb_io_buffer_map(io, size, offset, flags);
634static inline enum rb_io_buffer_flags
635io_flags_for_size(
size_t size)
637 if (size >= RUBY_IO_BUFFER_PAGE_SIZE) {
638 return RB_IO_BUFFER_MAPPED;
641 return RB_IO_BUFFER_INTERNAL;
671rb_io_buffer_initialize(
int argc,
VALUE *argv,
VALUE self)
673 io_buffer_experimental();
682 size = io_buffer_extract_size(argv[0]);
685 size = RUBY_IO_BUFFER_DEFAULT_SIZE;
688 enum rb_io_buffer_flags flags = 0;
693 flags |= io_flags_for_size(size);
696 io_buffer_initialize(buffer, NULL, size, flags,
Qnil);
702io_buffer_validate_slice(
VALUE source,
void *base,
size_t size)
704 void *source_base = NULL;
705 size_t source_size = 0;
711 rb_io_buffer_get_bytes(source, &source_base, &source_size);
715 if (source_base == NULL)
return 0;
718 if (base < source_base)
return 0;
720 const void *source_end = (
char*)source_base + source_size;
721 const void *end = (
char*)base + size;
724 if (end > source_end)
return 0;
733 if (buffer->source !=
Qnil) {
735 return io_buffer_validate_slice(buffer->source, buffer->base, buffer->size);
752rb_io_buffer_to_s(
VALUE self)
760 rb_str_catf(result,
" %p+%"PRIdSIZE, buffer->base, buffer->size);
762 if (buffer->base == NULL) {
766 if (buffer->flags & RB_IO_BUFFER_EXTERNAL) {
770 if (buffer->flags & RB_IO_BUFFER_INTERNAL) {
774 if (buffer->flags & RB_IO_BUFFER_MAPPED) {
778 if (buffer->flags & RB_IO_BUFFER_SHARED) {
782 if (buffer->flags & RB_IO_BUFFER_LOCKED) {
786 if (buffer->flags & RB_IO_BUFFER_READONLY) {
790 if (buffer->source !=
Qnil) {
794 if (!io_buffer_validate(buffer)) {
802io_buffer_hexdump(
VALUE string,
size_t width,
char *base,
size_t size,
int first)
804 char *text = alloca(width+1);
807 for (
size_t offset = 0; offset < size; offset += width) {
808 memset(text,
'\0', width);
814 rb_str_catf(
string,
"\n0x%08" PRIxSIZE
" ", offset);
817 for (
size_t i = 0; i < width; i += 1) {
818 if (offset+i < size) {
819 unsigned char value = ((
unsigned char*)base)[offset+i];
821 if (value < 127 && isprint(value)) {
822 text[i] = (char)value;
842rb_io_buffer_hexdump(
VALUE self)
849 if (io_buffer_validate(buffer) && buffer->base) {
850 result =
rb_str_buf_new(buffer->size*3 + (buffer->size/16)*12 + 1);
852 io_buffer_hexdump(result, 16, buffer->base, buffer->size, 1);
859rb_io_buffer_inspect(
VALUE self)
864 VALUE result = rb_io_buffer_to_s(self);
866 if (io_buffer_validate(buffer)) {
868 if (buffer->size <= 256) {
869 io_buffer_hexdump(result, 16, buffer->base, buffer->size, 0);
883rb_io_buffer_size(
VALUE self)
900rb_io_buffer_valid_p(
VALUE self)
905 return RBOOL(io_buffer_validate(buffer));
915rb_io_buffer_null_p(
VALUE self)
920 return RBOOL(buffer->base == NULL);
931rb_io_buffer_empty_p(
VALUE self)
936 return RBOOL(buffer->size == 0);
951rb_io_buffer_external_p(
VALUE self)
956 return RBOOL(buffer->flags & RB_IO_BUFFER_EXTERNAL);
976rb_io_buffer_internal_p(
VALUE self)
981 return RBOOL(buffer->flags & RB_IO_BUFFER_INTERNAL);
998rb_io_buffer_mapped_p(
VALUE self)
1003 return RBOOL(buffer->flags & RB_IO_BUFFER_MAPPED);
1014rb_io_buffer_shared_p(
VALUE self)
1019 return RBOOL(buffer->flags & RB_IO_BUFFER_SHARED);
1039rb_io_buffer_locked_p(
VALUE self)
1044 return RBOOL(buffer->flags & RB_IO_BUFFER_LOCKED);
1048rb_io_buffer_readonly_p(
VALUE self)
1053 return buffer->flags & RB_IO_BUFFER_READONLY;
1065io_buffer_readonly_p(
VALUE self)
1067 return RBOOL(rb_io_buffer_readonly_p(self));
1073 if (buffer->flags & RB_IO_BUFFER_LOCKED) {
1074 rb_raise(rb_eIOBufferLockedError,
"Buffer already locked!");
1077 buffer->flags |= RB_IO_BUFFER_LOCKED;
1081rb_io_buffer_lock(
VALUE self)
1086 io_buffer_lock(buffer);
1094 if (!(buffer->flags & RB_IO_BUFFER_LOCKED)) {
1095 rb_raise(rb_eIOBufferLockedError,
"Buffer not locked!");
1098 buffer->flags &= ~RB_IO_BUFFER_LOCKED;
1102rb_io_buffer_unlock(
VALUE self)
1107 io_buffer_unlock(buffer);
1113rb_io_buffer_try_unlock(
VALUE self)
1118 if (buffer->flags & RB_IO_BUFFER_LOCKED) {
1119 buffer->flags &= ~RB_IO_BUFFER_LOCKED;
1159rb_io_buffer_locked(
VALUE self)
1164 if (buffer->flags & RB_IO_BUFFER_LOCKED) {
1165 rb_raise(rb_eIOBufferLockedError,
"Buffer already locked!");
1168 buffer->flags |= RB_IO_BUFFER_LOCKED;
1172 buffer->flags &= ~RB_IO_BUFFER_LOCKED;
1205rb_io_buffer_free(
VALUE self)
1210 if (buffer->flags & RB_IO_BUFFER_LOCKED) {
1211 rb_raise(rb_eIOBufferLockedError,
"Buffer is locked!");
1214 io_buffer_free(buffer);
1222io_buffer_validate_range(
struct rb_io_buffer *buffer,
size_t offset,
size_t length)
1225 if (offset + length > buffer->size) {
1231rb_io_buffer_slice(
struct rb_io_buffer *buffer,
VALUE self,
size_t offset,
size_t length)
1233 io_buffer_validate_range(buffer, offset, length);
1239 slice->base = (
char*)buffer->base + offset;
1240 slice->size = length;
1243 if (buffer->source !=
Qnil)
1244 slice->source = buffer->source;
1246 slice->source = self;
1308io_buffer_slice(
int argc,
VALUE *argv,
VALUE self)
1312 size_t offset, length;
1313 struct rb_io_buffer *buffer = io_buffer_extract_offset_length(self, argc, argv, &offset, &length);
1315 return rb_io_buffer_slice(buffer, self, offset, length);
1319rb_io_buffer_get_bytes(
VALUE self,
void **base,
size_t *size)
1324 if (io_buffer_validate(buffer)) {
1326 *base = buffer->base;
1327 *size = buffer->size;
1329 return buffer->flags;
1340io_buffer_get_bytes_for_writing(
struct rb_io_buffer *buffer,
void **base,
size_t *size)
1342 if (buffer->flags & RB_IO_BUFFER_READONLY) {
1343 rb_raise(rb_eIOBufferAccessError,
"Buffer is not writable!");
1346 if (!io_buffer_validate(buffer)) {
1347 rb_raise(rb_eIOBufferInvalidatedError,
"Buffer is invalid!");
1351 *base = buffer->base;
1352 *size = buffer->size;
1357 rb_raise(rb_eIOBufferAllocationError,
"The buffer is not allocated!");
1361rb_io_buffer_get_bytes_for_writing(
VALUE self,
void **base,
size_t *size)
1366 io_buffer_get_bytes_for_writing(buffer, base, size);
1370io_buffer_get_bytes_for_reading(
struct rb_io_buffer *buffer,
const void **base,
size_t *size)
1372 if (!io_buffer_validate(buffer)) {
1373 rb_raise(rb_eIOBufferInvalidatedError,
"Buffer has been invalidated!");
1377 *base = buffer->base;
1378 *size = buffer->size;
1383 rb_raise(rb_eIOBufferAllocationError,
"The buffer is not allocated!");
1387rb_io_buffer_get_bytes_for_reading(
VALUE self,
const void **base,
size_t *size)
1392 io_buffer_get_bytes_for_reading(buffer, base, size);
1415rb_io_buffer_transfer(
VALUE self)
1420 if (buffer->flags & RB_IO_BUFFER_LOCKED) {
1421 rb_raise(rb_eIOBufferLockedError,
"Cannot transfer ownership of locked buffer!");
1428 *transferred = *buffer;
1429 io_buffer_zero(buffer);
1435io_buffer_resize_clear(
struct rb_io_buffer *buffer,
void* base,
size_t size)
1437 if (size > buffer->size) {
1438 memset((
unsigned char*)base+buffer->size, 0, size - buffer->size);
1443io_buffer_resize_copy(
struct rb_io_buffer *buffer,
size_t size)
1447 io_buffer_initialize(&resized, NULL, size, io_flags_for_size(size),
Qnil);
1450 size_t preserve = buffer->size;
1451 if (preserve > size) preserve = size;
1452 memcpy(resized.base, buffer->base, preserve);
1454 io_buffer_resize_clear(buffer, resized.base, size);
1457 io_buffer_free(buffer);
1462rb_io_buffer_resize(
VALUE self,
size_t size)
1467 if (buffer->flags & RB_IO_BUFFER_LOCKED) {
1468 rb_raise(rb_eIOBufferLockedError,
"Cannot resize locked buffer!");
1471 if (buffer->base == NULL) {
1472 io_buffer_initialize(buffer, NULL, size, io_flags_for_size(size),
Qnil);
1476 if (buffer->flags & RB_IO_BUFFER_EXTERNAL) {
1477 rb_raise(rb_eIOBufferAccessError,
"Cannot resize external buffer!");
1480#if defined(HAVE_MREMAP) && defined(MREMAP_MAYMOVE)
1481 if (buffer->flags & RB_IO_BUFFER_MAPPED) {
1482 void *base = mremap(buffer->base, buffer->size, size, MREMAP_MAYMOVE);
1484 if (base == MAP_FAILED) {
1488 io_buffer_resize_clear(buffer, base, size);
1490 buffer->base = base;
1491 buffer->size = size;
1497 if (buffer->flags & RB_IO_BUFFER_INTERNAL) {
1499 io_buffer_free(buffer);
1503 void *base = realloc(buffer->base, size);
1509 io_buffer_resize_clear(buffer, base, size);
1511 buffer->base = base;
1512 buffer->size = size;
1517 io_buffer_resize_copy(buffer, size);
1541 rb_io_buffer_resize(self, io_buffer_extract_size(size));
1555 const void *ptr1, *ptr2;
1556 size_t size1, size2;
1558 rb_io_buffer_get_bytes_for_reading(self, &ptr1, &size1);
1559 rb_io_buffer_get_bytes_for_reading(other, &ptr2, &size2);
1561 if (size1 < size2) {
1565 if (size1 > size2) {
1569 return RB_INT2NUM(memcmp(ptr1, ptr2, size1));
1573io_buffer_validate_type(
size_t size,
size_t offset)
1575 if (offset > size) {
1576 rb_raise(
rb_eArgError,
"Type extends beyond end of buffer! (offset=%"PRIdSIZE
" > size=%"PRIdSIZE
")", offset, size);
1598#define ruby_swap8(value) value
1606ruby_swapf32(
float value)
1608 union swapf32 swap = {.value = value};
1609 swap.integral = ruby_swap32(swap.integral);
1619ruby_swapf64(
double value)
1621 union swapf64 swap = {.value = value};
1622 swap.integral = ruby_swap64(swap.integral);
1626#define IO_BUFFER_DECLARE_TYPE(name, type, endian, wrap, unwrap, swap) \
1627static ID RB_IO_BUFFER_DATA_TYPE_##name; \
1630io_buffer_read_##name(const void* base, size_t size, size_t *offset) \
1632 io_buffer_validate_type(size, *offset + sizeof(type)); \
1634 memcpy(&value, (char*)base + *offset, sizeof(type)); \
1635 if (endian != RB_IO_BUFFER_HOST_ENDIAN) value = swap(value); \
1636 *offset += sizeof(type); \
1637 return wrap(value); \
1641io_buffer_write_##name(const void* base, size_t size, size_t *offset, VALUE _value) \
1643 io_buffer_validate_type(size, *offset + sizeof(type)); \
1644 type value = unwrap(_value); \
1645 if (endian != RB_IO_BUFFER_HOST_ENDIAN) value = swap(value); \
1646 memcpy((char*)base + *offset, &value, sizeof(type)); \
1647 *offset += sizeof(type); \
1651 RB_IO_BUFFER_DATA_TYPE_##name##_SIZE = sizeof(type) \
1659IO_BUFFER_DECLARE_TYPE(s16, int16_t, RB_IO_BUFFER_LITTLE_ENDIAN,
RB_INT2NUM,
RB_NUM2INT, ruby_swap16)
1660IO_BUFFER_DECLARE_TYPE(S16, int16_t, RB_IO_BUFFER_BIG_ENDIAN,
RB_INT2NUM,
RB_NUM2INT, ruby_swap16)
1664IO_BUFFER_DECLARE_TYPE(s32, int32_t, RB_IO_BUFFER_LITTLE_ENDIAN,
RB_INT2NUM,
RB_NUM2INT, ruby_swap32)
1665IO_BUFFER_DECLARE_TYPE(S32, int32_t, RB_IO_BUFFER_BIG_ENDIAN,
RB_INT2NUM,
RB_NUM2INT, ruby_swap32)
1667IO_BUFFER_DECLARE_TYPE(u64, uint64_t, RB_IO_BUFFER_LITTLE_ENDIAN,
RB_ULL2NUM,
RB_NUM2ULL, ruby_swap64)
1668IO_BUFFER_DECLARE_TYPE(U64, uint64_t, RB_IO_BUFFER_BIG_ENDIAN,
RB_ULL2NUM,
RB_NUM2ULL, ruby_swap64)
1669IO_BUFFER_DECLARE_TYPE(s64, int64_t, RB_IO_BUFFER_LITTLE_ENDIAN,
RB_LL2NUM,
RB_NUM2LL, ruby_swap64)
1670IO_BUFFER_DECLARE_TYPE(S64, int64_t, RB_IO_BUFFER_BIG_ENDIAN,
RB_LL2NUM,
RB_NUM2LL, ruby_swap64)
1672IO_BUFFER_DECLARE_TYPE(f32,
float, RB_IO_BUFFER_LITTLE_ENDIAN,
DBL2NUM,
NUM2DBL, ruby_swapf32)
1673IO_BUFFER_DECLARE_TYPE(F32,
float, RB_IO_BUFFER_BIG_ENDIAN,
DBL2NUM,
NUM2DBL, ruby_swapf32)
1674IO_BUFFER_DECLARE_TYPE(f64,
double, RB_IO_BUFFER_LITTLE_ENDIAN,
DBL2NUM,
NUM2DBL, ruby_swapf64)
1675IO_BUFFER_DECLARE_TYPE(F64,
double, RB_IO_BUFFER_BIG_ENDIAN,
DBL2NUM,
NUM2DBL, ruby_swapf64)
1676#undef IO_BUFFER_DECLARE_TYPE
1679io_buffer_buffer_type_size(
ID buffer_type)
1681#define IO_BUFFER_DATA_TYPE_SIZE(name) if (buffer_type == RB_IO_BUFFER_DATA_TYPE_##name) return RB_IO_BUFFER_DATA_TYPE_##name##_SIZE;
1682 IO_BUFFER_DATA_TYPE_SIZE(U8)
1683 IO_BUFFER_DATA_TYPE_SIZE(S8)
1684 IO_BUFFER_DATA_TYPE_SIZE(u16)
1685 IO_BUFFER_DATA_TYPE_SIZE(U16)
1686 IO_BUFFER_DATA_TYPE_SIZE(s16)
1687 IO_BUFFER_DATA_TYPE_SIZE(S16)
1688 IO_BUFFER_DATA_TYPE_SIZE(u32)
1689 IO_BUFFER_DATA_TYPE_SIZE(U32)
1690 IO_BUFFER_DATA_TYPE_SIZE(s32)
1691 IO_BUFFER_DATA_TYPE_SIZE(S32)
1692 IO_BUFFER_DATA_TYPE_SIZE(u64)
1693 IO_BUFFER_DATA_TYPE_SIZE(U64)
1694 IO_BUFFER_DATA_TYPE_SIZE(s64)
1695 IO_BUFFER_DATA_TYPE_SIZE(S64)
1696 IO_BUFFER_DATA_TYPE_SIZE(f32)
1697 IO_BUFFER_DATA_TYPE_SIZE(F32)
1698 IO_BUFFER_DATA_TYPE_SIZE(f64)
1699 IO_BUFFER_DATA_TYPE_SIZE(F64)
1700#undef IO_BUFFER_DATA_TYPE_SIZE
1718io_buffer_size_of(
VALUE klass,
VALUE buffer_type)
1722 for (
long i = 0; i <
RARRAY_LEN(buffer_type); i++) {
1733rb_io_buffer_get_value(
const void* base,
size_t size,
ID buffer_type,
size_t *offset)
1735#define IO_BUFFER_GET_VALUE(name) if (buffer_type == RB_IO_BUFFER_DATA_TYPE_##name) return io_buffer_read_##name(base, size, offset);
1736 IO_BUFFER_GET_VALUE(U8)
1737 IO_BUFFER_GET_VALUE(S8)
1739 IO_BUFFER_GET_VALUE(u16)
1740 IO_BUFFER_GET_VALUE(U16)
1741 IO_BUFFER_GET_VALUE(s16)
1742 IO_BUFFER_GET_VALUE(S16)
1744 IO_BUFFER_GET_VALUE(u32)
1745 IO_BUFFER_GET_VALUE(U32)
1746 IO_BUFFER_GET_VALUE(s32)
1747 IO_BUFFER_GET_VALUE(S32)
1749 IO_BUFFER_GET_VALUE(u64)
1750 IO_BUFFER_GET_VALUE(U64)
1751 IO_BUFFER_GET_VALUE(s64)
1752 IO_BUFFER_GET_VALUE(S64)
1754 IO_BUFFER_GET_VALUE(f32)
1755 IO_BUFFER_GET_VALUE(F32)
1756 IO_BUFFER_GET_VALUE(f64)
1757 IO_BUFFER_GET_VALUE(F64)
1758#undef IO_BUFFER_GET_VALUE
1804 size_t offset = io_buffer_extract_offset(_offset);
1806 rb_io_buffer_get_bytes_for_reading(self, &base, &size);
1808 return rb_io_buffer_get_value(base, size,
RB_SYM2ID(
type), &offset);
1826 size_t offset = io_buffer_extract_offset(_offset);
1830 rb_io_buffer_get_bytes_for_reading(self, &base, &size);
1838 for (
long i = 0; i <
RARRAY_LEN(buffer_types); i++) {
1839 VALUE type = rb_ary_entry(buffer_types, i);
1841 rb_ary_push(array, value);
1850io_buffer_extract_count(
VALUE argument)
1852 if (rb_int_negative_p(argument)) {
1860io_buffer_extract_offset_count(
ID buffer_type,
size_t size,
int argc,
VALUE *argv,
size_t *offset,
size_t *count)
1863 *offset = io_buffer_extract_offset(argv[0]);
1870 *count = io_buffer_extract_count(argv[1]);
1873 if (*offset > size) {
1877 *count = (size - *offset) / io_buffer_buffer_type_size(buffer_type);
1900io_buffer_each(
int argc,
VALUE *argv,
VALUE self)
1907 rb_io_buffer_get_bytes_for_reading(self, &base, &size);
1914 buffer_type = RB_IO_BUFFER_DATA_TYPE_U8;
1917 size_t offset, count;
1918 io_buffer_extract_offset_count(buffer_type, size, argc-1, argv+1, &offset, &count);
1920 for (
size_t i = 0; i < count; i++) {
1921 size_t current_offset = offset;
1922 VALUE value = rb_io_buffer_get_value(base, size, buffer_type, &offset);
1942io_buffer_values(
int argc,
VALUE *argv,
VALUE self)
1947 rb_io_buffer_get_bytes_for_reading(self, &base, &size);
1954 buffer_type = RB_IO_BUFFER_DATA_TYPE_U8;
1957 size_t offset, count;
1958 io_buffer_extract_offset_count(buffer_type, size, argc-1, argv+1, &offset, &count);
1960 VALUE array = rb_ary_new_capa(count);
1962 for (
size_t i = 0; i < count; i++) {
1963 VALUE value = rb_io_buffer_get_value(base, size, buffer_type, &offset);
1964 rb_ary_push(array, value);
1988io_buffer_each_byte(
int argc,
VALUE *argv,
VALUE self)
1995 rb_io_buffer_get_bytes_for_reading(self, &base, &size);
1997 size_t offset, count;
1998 io_buffer_extract_offset_count(RB_IO_BUFFER_DATA_TYPE_U8, size, argc-1, argv+1, &offset, &count);
2000 for (
size_t i = 0; i < count; i++) {
2001 unsigned char *value = (
unsigned char *)base + i + offset;
2009rb_io_buffer_set_value(
const void* base,
size_t size,
ID buffer_type,
size_t *offset,
VALUE value)
2011#define IO_BUFFER_SET_VALUE(name) if (buffer_type == RB_IO_BUFFER_DATA_TYPE_##name) {io_buffer_write_##name(base, size, offset, value); return;}
2012 IO_BUFFER_SET_VALUE(U8);
2013 IO_BUFFER_SET_VALUE(S8);
2015 IO_BUFFER_SET_VALUE(u16);
2016 IO_BUFFER_SET_VALUE(U16);
2017 IO_BUFFER_SET_VALUE(s16);
2018 IO_BUFFER_SET_VALUE(S16);
2020 IO_BUFFER_SET_VALUE(u32);
2021 IO_BUFFER_SET_VALUE(U32);
2022 IO_BUFFER_SET_VALUE(s32);
2023 IO_BUFFER_SET_VALUE(S32);
2025 IO_BUFFER_SET_VALUE(u64);
2026 IO_BUFFER_SET_VALUE(U64);
2027 IO_BUFFER_SET_VALUE(s64);
2028 IO_BUFFER_SET_VALUE(S64);
2030 IO_BUFFER_SET_VALUE(f32);
2031 IO_BUFFER_SET_VALUE(F32);
2032 IO_BUFFER_SET_VALUE(f64);
2033 IO_BUFFER_SET_VALUE(F64);
2034#undef IO_BUFFER_SET_VALUE
2074 size_t offset = io_buffer_extract_offset(_offset);
2076 rb_io_buffer_get_bytes_for_writing(self, &base, &size);
2078 rb_io_buffer_set_value(base, size,
RB_SYM2ID(
type), &offset, value);
2114 size_t offset = io_buffer_extract_offset(_offset);
2118 rb_io_buffer_get_bytes_for_writing(self, &base, &size);
2120 for (
long i = 0; i <
RARRAY_LEN(buffer_types); i++) {
2121 VALUE type = rb_ary_entry(buffer_types, i);
2122 VALUE value = rb_ary_entry(values, i);
2123 rb_io_buffer_set_value(base, size,
RB_SYM2ID(
type), &offset, value);
2130io_buffer_memcpy(
struct rb_io_buffer *buffer,
size_t offset,
const void *source_base,
size_t source_offset,
size_t source_size,
size_t length)
2134 io_buffer_get_bytes_for_writing(buffer, &base, &size);
2136 io_buffer_validate_range(buffer, offset, length);
2138 if (source_offset + length > source_size) {
2142 memcpy((
unsigned char*)base+offset, (
unsigned char*)source_base+source_offset, length);
2147io_buffer_copy_from(
struct rb_io_buffer *buffer,
const void *source_base,
size_t source_size,
int argc,
VALUE *argv)
2151 size_t source_offset;
2155 offset = io_buffer_extract_offset(argv[0]);
2160 source_offset = io_buffer_extract_offset(argv[2]);
2162 if (source_offset > source_size) {
2171 if (argc >= 2 && !
RB_NIL_P(argv[1])) {
2172 length = io_buffer_extract_length(argv[1]);
2176 length = source_size - source_offset;
2179 io_buffer_memcpy(buffer, offset, source_base, source_offset, source_size, length);
2202rb_io_buffer_initialize_copy(
VALUE self,
VALUE source)
2207 const void *source_base;
2210 rb_io_buffer_get_bytes_for_reading(source, &source_base, &source_size);
2212 io_buffer_initialize(buffer, NULL, source_size, io_flags_for_size(source_size),
Qnil);
2214 return io_buffer_copy_from(buffer, source_base, source_size, 0, NULL);
2272io_buffer_copy(
int argc,
VALUE *argv,
VALUE self)
2279 VALUE source = argv[0];
2280 const void *source_base;
2283 rb_io_buffer_get_bytes_for_reading(source, &source_base, &source_size);
2285 return io_buffer_copy_from(buffer, source_base, source_size, argc-1, argv+1);
2303io_buffer_get_string(
int argc,
VALUE *argv,
VALUE self)
2307 size_t offset, length;
2308 struct rb_io_buffer *buffer = io_buffer_extract_offset_length(self, argc, argv, &offset, &length);
2312 io_buffer_get_bytes_for_reading(buffer, &base, &size);
2316 encoding = rb_find_encoding(argv[2]);
2319 encoding = rb_ascii8bit_encoding();
2322 io_buffer_validate_range(buffer, offset, length);
2324 return rb_enc_str_new((
const char*)base + offset, length, encoding);
2351io_buffer_set_string(
int argc,
VALUE *argv,
VALUE self)
2363 return io_buffer_copy_from(buffer, source_base, source_size, argc-1, argv+1);
2367rb_io_buffer_clear(
VALUE self, uint8_t value,
size_t offset,
size_t length)
2374 io_buffer_get_bytes_for_writing(buffer, &base, &size);
2376 io_buffer_validate_range(buffer, offset, length);
2378 memset((
char*)base + offset, value, length);
2413io_buffer_clear(
int argc,
VALUE *argv,
VALUE self)
2422 size_t offset, length;
2423 io_buffer_extract_offset_length(self, argc-1, argv+1, &offset, &length);
2425 rb_io_buffer_clear(self, value, offset, length);
2431io_buffer_default_size(
size_t page_size)
2434 const size_t platform_agnostic_default_size = 64*1024;
2437 const char *default_size = getenv(
"RUBY_IO_BUFFER_DEFAULT_SIZE");
2440 int value = atoi(default_size);
2448 if (platform_agnostic_default_size < page_size) {
2452 return platform_agnostic_default_size;
2457 rb_blocking_function_t *function;
2463io_buffer_blocking_region_begin(
VALUE _argument)
2467 return rb_thread_io_blocking_region(argument->function, argument->data, argument->descriptor);
2471io_buffer_blocking_region_ensure(
VALUE _argument)
2475 io_buffer_unlock(argument->buffer);
2481io_buffer_blocking_region(
struct rb_io_buffer *buffer, rb_blocking_function_t *function,
void *data,
int descriptor)
2485 .function = function,
2487 .descriptor = descriptor,
2491 if (buffer->flags & RB_IO_BUFFER_LOCKED) {
2492 return io_buffer_blocking_region_begin((
VALUE)&argument);
2496 io_buffer_lock(buffer);
2498 return rb_ensure(io_buffer_blocking_region_begin, (
VALUE)&argument, io_buffer_blocking_region_ensure, (
VALUE)&argument);
2515io_buffer_read_internal(
void *_argument)
2521 ssize_t result = read(argument->descriptor, argument->base, argument->size);
2526 else if (result == 0) {
2532 if (total >= argument->length) {
2536 argument->base = argument->base + result;
2537 argument->size = argument->size - result;
2543rb_io_buffer_read(
VALUE self,
VALUE io,
size_t length,
size_t offset)
2546 if (scheduler !=
Qnil) {
2549 if (!UNDEF_P(result)) {
2557 io_buffer_validate_range(buffer, offset, length);
2563 io_buffer_get_bytes_for_writing(buffer, &base, &size);
2565 base = (
unsigned char*)base + offset;
2568 .descriptor = descriptor,
2574 return io_buffer_blocking_region(buffer, io_buffer_read_internal, &argument, descriptor);
2602io_buffer_read(
int argc,
VALUE *argv,
VALUE self)
2608 size_t length, offset;
2609 io_buffer_extract_length_offset(self, argc-1, argv+1, &length, &offset);
2611 return rb_io_buffer_read(self, io, length, offset);
2622io_buffer_pread_internal(
void *_argument)
2626#if defined(HAVE_PREAD)
2627 ssize_t result = pread(argument->descriptor, argument->base, argument->size, argument->offset);
2630 rb_off_t offset = lseek(argument->descriptor, 0, SEEK_CUR);
2631 if (offset == (rb_off_t)-1)
2634 if (lseek(argument->descriptor, argument->offset, SEEK_SET) == (rb_off_t)-1)
2637 ssize_t result = read(argument->descriptor, argument->base, argument->size);
2639 if (lseek(argument->descriptor, offset, SEEK_SET) == (rb_off_t)-1)
2647rb_io_buffer_pread(
VALUE self,
VALUE io, rb_off_t from,
size_t length,
size_t offset)
2650 if (scheduler !=
Qnil) {
2653 if (!UNDEF_P(result)) {
2661 io_buffer_validate_range(buffer, offset, length);
2667 io_buffer_get_bytes_for_writing(buffer, &base, &size);
2670 .descriptor = descriptor,
2673 .base = (
unsigned char*)base + offset,
2682 return io_buffer_blocking_region(buffer, io_buffer_pread_internal, &argument, descriptor);
2712io_buffer_pread(
int argc,
VALUE *argv,
VALUE self)
2719 size_t length, offset;
2720 io_buffer_extract_length_offset(self, argc-2, argv+2, &length, &offset);
2722 return rb_io_buffer_pread(self, io, from, length, offset);
2738io_buffer_write_internal(
void *_argument)
2744 ssize_t result = write(argument->descriptor, argument->base, argument->size);
2749 else if (result == 0) {
2755 if (total >= argument->length) {
2759 argument->base = argument->base + result;
2760 argument->size = argument->size - result;
2766rb_io_buffer_write(
VALUE self,
VALUE io,
size_t length,
size_t offset)
2769 if (scheduler !=
Qnil) {
2772 if (!UNDEF_P(result)) {
2780 io_buffer_validate_range(buffer, offset, length);
2786 io_buffer_get_bytes_for_reading(buffer, &base, &size);
2788 base = (
unsigned char *)base + offset;
2791 .descriptor = descriptor,
2797 return io_buffer_blocking_region(buffer, io_buffer_write_internal, &argument, descriptor);
2818io_buffer_write(
int argc,
VALUE *argv,
VALUE self)
2824 size_t length, offset;
2825 io_buffer_extract_length_offset(self, argc-1, argv+1, &length, &offset);
2827 return rb_io_buffer_write(self, io, length, offset);
2838io_buffer_pwrite_internal(
void *_argument)
2842#if defined(HAVE_PWRITE)
2843 ssize_t result = pwrite(argument->descriptor, argument->base, argument->size, argument->offset);
2846 rb_off_t offset = lseek(argument->descriptor, 0, SEEK_CUR);
2847 if (offset == (rb_off_t)-1)
2850 if (lseek(argument->descriptor, argument->offset, SEEK_SET) == (rb_off_t)-1)
2853 ssize_t result = write(argument->descriptor, argument->base, argument->size);
2855 if (lseek(argument->descriptor, offset, SEEK_SET) == (rb_off_t)-1)
2863rb_io_buffer_pwrite(
VALUE self,
VALUE io, rb_off_t from,
size_t length,
size_t offset)
2866 if (scheduler !=
Qnil) {
2869 if (!UNDEF_P(result)) {
2877 io_buffer_validate_range(buffer, offset, length);
2883 io_buffer_get_bytes_for_reading(buffer, &base, &size);
2886 .descriptor = descriptor,
2889 .base = (
unsigned char *)base + offset,
2898 return io_buffer_blocking_region(buffer, io_buffer_pwrite_internal, &argument, descriptor);
2918io_buffer_pwrite(
int argc,
VALUE *argv,
VALUE self)
2925 size_t length, offset;
2926 io_buffer_extract_length_offset(self, argc-2, argv+2, &length, &offset);
2928 return rb_io_buffer_pwrite(self, io, from, length, offset);
2934 if (buffer->size == 0)
2935 rb_raise(rb_eIOBufferMaskError,
"Zero-length mask given!");
2939memory_and(
unsigned char * restrict output,
unsigned char * restrict base,
size_t size,
unsigned char * restrict mask,
size_t mask_size)
2941 for (
size_t offset = 0; offset < size; offset += 1) {
2942 output[offset] = base[offset] & mask[offset % mask_size];
2967 io_buffer_check_mask(mask_buffer);
2969 VALUE output = rb_io_buffer_new(NULL, buffer->size, io_flags_for_size(buffer->size));
2973 memory_and(output_buffer->base, buffer->base, buffer->size, mask_buffer->base, mask_buffer->size);
2979memory_or(
unsigned char * restrict output,
unsigned char * restrict base,
size_t size,
unsigned char * restrict mask,
size_t mask_size)
2981 for (
size_t offset = 0; offset < size; offset += 1) {
2982 output[offset] = base[offset] | mask[offset % mask_size];
3007 io_buffer_check_mask(mask_buffer);
3009 VALUE output = rb_io_buffer_new(NULL, buffer->size, io_flags_for_size(buffer->size));
3013 memory_or(output_buffer->base, buffer->base, buffer->size, mask_buffer->base, mask_buffer->size);
3019memory_xor(
unsigned char * restrict output,
unsigned char * restrict base,
size_t size,
unsigned char * restrict mask,
size_t mask_size)
3021 for (
size_t offset = 0; offset < size; offset += 1) {
3022 output[offset] = base[offset] ^ mask[offset % mask_size];
3047 io_buffer_check_mask(mask_buffer);
3049 VALUE output = rb_io_buffer_new(NULL, buffer->size, io_flags_for_size(buffer->size));
3053 memory_xor(output_buffer->base, buffer->base, buffer->size, mask_buffer->base, mask_buffer->size);
3059memory_not(
unsigned char * restrict output,
unsigned char * restrict base,
size_t size)
3061 for (
size_t offset = 0; offset < size; offset += 1) {
3062 output[offset] = ~base[offset];
3079io_buffer_not(
VALUE self)
3084 VALUE output = rb_io_buffer_new(NULL, buffer->size, io_flags_for_size(buffer->size));
3088 memory_not(output_buffer->base, buffer->base, buffer->size);
3096 if (a->base > b->base) {
3097 return io_buffer_overlaps(b, a);
3100 return (b->base >= a->base) && (b->base <= (
void*)((
unsigned char *)a->base + a->size));
3106 if (io_buffer_overlaps(a, b))
3107 rb_raise(rb_eIOBufferMaskError,
"Mask overlaps source buffer!");
3111memory_and_inplace(
unsigned char * restrict base,
size_t size,
unsigned char * restrict mask,
size_t mask_size)
3113 for (
size_t offset = 0; offset < size; offset += 1) {
3114 base[offset] &= mask[offset % mask_size];
3144 io_buffer_check_mask(mask_buffer);
3145 io_buffer_check_overlaps(buffer, mask_buffer);
3149 io_buffer_get_bytes_for_writing(buffer, &base, &size);
3151 memory_and_inplace(base, size, mask_buffer->base, mask_buffer->size);
3157memory_or_inplace(
unsigned char * restrict base,
size_t size,
unsigned char * restrict mask,
size_t mask_size)
3159 for (
size_t offset = 0; offset < size; offset += 1) {
3160 base[offset] |= mask[offset % mask_size];
3190 io_buffer_check_mask(mask_buffer);
3191 io_buffer_check_overlaps(buffer, mask_buffer);
3195 io_buffer_get_bytes_for_writing(buffer, &base, &size);
3197 memory_or_inplace(base, size, mask_buffer->base, mask_buffer->size);
3203memory_xor_inplace(
unsigned char * restrict base,
size_t size,
unsigned char * restrict mask,
size_t mask_size)
3205 for (
size_t offset = 0; offset < size; offset += 1) {
3206 base[offset] ^= mask[offset % mask_size];
3236 io_buffer_check_mask(mask_buffer);
3237 io_buffer_check_overlaps(buffer, mask_buffer);
3241 io_buffer_get_bytes_for_writing(buffer, &base, &size);
3243 memory_xor_inplace(base, size, mask_buffer->base, mask_buffer->size);
3249memory_not_inplace(
unsigned char * restrict base,
size_t size)
3251 for (
size_t offset = 0; offset < size; offset += 1) {
3252 base[offset] = ~base[offset];
3274io_buffer_not_inplace(
VALUE self)
3281 io_buffer_get_bytes_for_writing(buffer, &base, &size);
3283 memory_not_inplace(base, size);
3386 GetSystemInfo(&info);
3387 RUBY_IO_BUFFER_PAGE_SIZE = info.dwPageSize;
3389 RUBY_IO_BUFFER_PAGE_SIZE = sysconf(_SC_PAGESIZE);
3392 RUBY_IO_BUFFER_DEFAULT_SIZE = io_buffer_default_size(RUBY_IO_BUFFER_PAGE_SIZE);
3402 rb_define_method(rb_cIOBuffer,
"initialize_copy", rb_io_buffer_initialize_copy, 1);
3450#define IO_BUFFER_DEFINE_DATA_TYPE(name) RB_IO_BUFFER_DATA_TYPE_##name = rb_intern_const(#name)
3451 IO_BUFFER_DEFINE_DATA_TYPE(U8);
3452 IO_BUFFER_DEFINE_DATA_TYPE(S8);
3454 IO_BUFFER_DEFINE_DATA_TYPE(u16);
3455 IO_BUFFER_DEFINE_DATA_TYPE(U16);
3456 IO_BUFFER_DEFINE_DATA_TYPE(s16);
3457 IO_BUFFER_DEFINE_DATA_TYPE(S16);
3459 IO_BUFFER_DEFINE_DATA_TYPE(u32);
3460 IO_BUFFER_DEFINE_DATA_TYPE(U32);
3461 IO_BUFFER_DEFINE_DATA_TYPE(s32);
3462 IO_BUFFER_DEFINE_DATA_TYPE(S32);
3464 IO_BUFFER_DEFINE_DATA_TYPE(u64);
3465 IO_BUFFER_DEFINE_DATA_TYPE(U64);
3466 IO_BUFFER_DEFINE_DATA_TYPE(s64);
3467 IO_BUFFER_DEFINE_DATA_TYPE(S64);
3469 IO_BUFFER_DEFINE_DATA_TYPE(f32);
3470 IO_BUFFER_DEFINE_DATA_TYPE(F32);
3471 IO_BUFFER_DEFINE_DATA_TYPE(f64);
3472 IO_BUFFER_DEFINE_DATA_TYPE(F64);
3473#undef IO_BUFFER_DEFINE_DATA_TYPE
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
static bool RB_OBJ_FROZEN(VALUE obj)
Checks if an object is frozen.
void rb_include_module(VALUE klass, VALUE module)
Includes a module to a class.
VALUE rb_define_class_under(VALUE outer, const char *name, VALUE super)
Defines a class under the namespace of outer.
int rb_block_given_p(void)
Determines if the current method is given a block.
#define T_STRING
Old name of RUBY_T_STRING.
#define rb_str_cat2
Old name of rb_str_cat_cstr.
#define CLASS_OF
Old name of rb_class_of.
#define SIZET2NUM
Old name of RB_SIZE2NUM.
#define NUM2UINT
Old name of RB_NUM2UINT.
#define NUM2DBL
Old name of rb_num2dbl.
#define Qnil
Old name of RUBY_Qnil.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define NIL_P
Old name of RB_NIL_P.
#define DBL2NUM
Old name of rb_float_new.
#define NUM2SIZET
Old name of RB_NUM2SIZE.
void rb_category_warn(rb_warning_category_t category, const char *fmt,...)
Identical to rb_category_warning(), except it reports always regardless of runtime -W flag.
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
void rb_sys_fail(const char *mesg)
Converts a C errno into a Ruby exception, then raises it.
VALUE rb_eRuntimeError
RuntimeError exception.
VALUE rb_eArgError
ArgumentError exception.
@ RB_WARN_CATEGORY_EXPERIMENTAL
Warning is for experimental features.
static VALUE rb_class_of(VALUE obj)
Object to class mapping function.
VALUE rb_mComparable
Comparable module.
VALUE rb_enc_str_new(const char *ptr, long len, rb_encoding *enc)
Identical to rb_enc_str_new(), except it additionally takes an encoding.
#define RETURN_ENUMERATOR_KW(obj, argc, argv, kw_splat)
Identical to RETURN_SIZED_ENUMERATOR_KW(), except its size is unknown.
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
void rb_str_modify(VALUE str)
Declares that the string is about to be modified.
VALUE rb_str_locktmp(VALUE str)
Obtains a "temporary lock" of the string.
VALUE rb_str_unlocktmp(VALUE str)
Releases a lock formerly obtained by rb_str_locktmp().
VALUE rb_str_buf_new(long capa)
Allocates a "string buffer".
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
VALUE rb_class_name(VALUE obj)
Queries the name of the given object's class.
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
#define RB_SYM2ID
Just another name of rb_sym2id.
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
int rb_io_descriptor(VALUE io)
Returns an integer representing the numeric file descriptor for io.
#define RB_NUM2INT
Just another name of rb_num2int_inline.
#define RB_UINT2NUM
Just another name of rb_uint2num_inline.
#define RB_INT2NUM
Just another name of rb_int2num_inline.
static unsigned int RB_NUM2UINT(VALUE x)
Converts an instance of rb_cNumeric into C's unsigned int.
VALUE rb_str_catf(VALUE dst, const char *fmt,...)
Identical to rb_sprintf(), except it renders the output to the specified object rather than creating ...
#define RB_LL2NUM
Just another name of rb_ll2num_inline.
#define RB_ULL2NUM
Just another name of rb_ull2num_inline.
#define RB_NUM2ULL
Just another name of rb_num2ull_inline.
#define RB_NUM2LL
Just another name of rb_num2ll_inline.
VALUE rb_yield_values(int n,...)
Identical to rb_yield(), except it takes variadic number of parameters and pass them to the block.
VALUE rb_yield(VALUE val)
Yields the block.
static VALUE RB_INT2FIX(long i)
Converts a C's long into an instance of rb_cInteger.
VALUE type(ANYARGS)
ANYARGS-ed function type.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define NUM2OFFT
Converts an instance of rb_cNumeric into C's off_t.
#define RARRAY_LEN
Just another name of rb_array_len.
#define RARRAY_AREF(a, i)
#define StringValue(v)
Ensures that the parameter object is a String.
#define RSTRING_GETMEM(str, ptrvar, lenvar)
Convenient macro to obtain the contents and length at once.
static long RSTRING_LEN(VALUE str)
Queries the length of the string.
VALUE rb_str_to_str(VALUE obj)
Identical to rb_check_string_type(), except it raises exceptions in case of conversion failures.
static char * RSTRING_PTR(VALUE str)
Queries the contents pointer of the string.
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
#define RB_NO_KEYWORDS
Do not pass keywords.
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
VALUE rb_fiber_scheduler_io_pwrite(VALUE scheduler, VALUE io, rb_off_t from, VALUE buffer, size_t length, size_t offset)
Non-blocking write to the passed IO at the specified offset.
VALUE rb_fiber_scheduler_io_read(VALUE scheduler, VALUE io, VALUE buffer, size_t length, size_t offset)
Non-blocking read from the passed IO.
static VALUE rb_fiber_scheduler_io_result(ssize_t result, int error)
Wrap a ssize_t and int errno into a single VALUE.
VALUE rb_fiber_scheduler_io_pread(VALUE scheduler, VALUE io, rb_off_t from, VALUE buffer, size_t length, size_t offset)
Non-blocking read from the passed IO at the specified offset.
VALUE rb_fiber_scheduler_io_write(VALUE scheduler, VALUE io, VALUE buffer, size_t length, size_t offset)
Non-blocking write to the passed IO.
static bool RB_NIL_P(VALUE obj)
Checks if the given object is nil.
This is the struct that holds necessary info for a struct.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
uintptr_t VALUE
Type that represents a Ruby object.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.