Skip to content

Commit

Permalink
gguf : add support for I64 and F64 arrays (#6062)
Browse files Browse the repository at this point in the history
* gguf : add support for I64 and F64 arrays

GGML currently does not support I64 or F64 arrays and they are not often
used in machine learning, however if in the future the need arises, it
would be nice to add them now, so that the types are next to the other
types I8, I16, I32 in the enums, and it also reserves their type number.

Furthermore, with this addition the GGUF format becomes very usable for
most computational applications of NumPy (being compatible with the most
common NumPy dtypes: i8, i16, i32, i64, f32, f64), providing a faster,
and more versatile alternative to the `npz` format, and a simpler
alternative to the `hdf5` format.

The change in this PR seems small, not significantly increasing the
maintenance burden. I tested this from Python using GGUFWriter/Reader
and `gguf-dump`, as well as from C, everything seems to work.

* Fix compiler warnings
  • Loading branch information
certik authored Mar 15, 2024
1 parent aab606a commit 7ce2c77
Show file tree
Hide file tree
Showing 5 changed files with 40 additions and 7 deletions.
17 changes: 17 additions & 0 deletions ggml.c
Original file line number Diff line number Diff line change
Expand Up @@ -470,6 +470,19 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.type_size = sizeof(int32_t),
.is_quantized = false,
},
[GGML_TYPE_I64] = {
.type_name = "i64",
.blck_size = 1,
.type_size = sizeof(int64_t),
.is_quantized = false,
},
[GGML_TYPE_F64] = {
.type_name = "f64",
.blck_size = 1,
.type_size = sizeof(double),
.is_quantized = false,
.nrows = 1,
},
[GGML_TYPE_F32] = {
.type_name = "f32",
.blck_size = 1,
Expand Down Expand Up @@ -12418,6 +12431,8 @@ static void ggml_compute_forward_alibi(
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_I64:
case GGML_TYPE_F64:
case GGML_TYPE_COUNT:
{
GGML_ASSERT(false);
Expand Down Expand Up @@ -12504,6 +12519,8 @@ static void ggml_compute_forward_clamp(
case GGML_TYPE_I8:
case GGML_TYPE_I16:
case GGML_TYPE_I32:
case GGML_TYPE_I64:
case GGML_TYPE_F64:
case GGML_TYPE_COUNT:
{
GGML_ASSERT(false);
Expand Down
2 changes: 2 additions & 0 deletions ggml.h
Original file line number Diff line number Diff line change
Expand Up @@ -366,6 +366,8 @@ extern "C" {
GGML_TYPE_I8 = 24,
GGML_TYPE_I16 = 25,
GGML_TYPE_I32 = 26,
GGML_TYPE_I64 = 27,
GGML_TYPE_F64 = 28,
GGML_TYPE_COUNT,
};

Expand Down
4 changes: 4 additions & 0 deletions gguf-py/gguf/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -665,6 +665,8 @@ class GGMLQuantizationType(IntEnum):
I8 = 24
I16 = 25
I32 = 26
I64 = 27
F64 = 28


class GGUFEndian(IntEnum):
Expand Down Expand Up @@ -734,6 +736,8 @@ def get_type(val: Any) -> GGUFValueType:
GGMLQuantizationType.I8: (1, 1),
GGMLQuantizationType.I16: (1, 2),
GGMLQuantizationType.I32: (1, 4),
GGMLQuantizationType.I64: (1, 8),
GGMLQuantizationType.F64: (1, 8),
}


Expand Down
12 changes: 9 additions & 3 deletions gguf-py/gguf/gguf_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -242,12 +242,15 @@ def _build_tensors(self, start_offs: int, fields: list[ReaderField]) -> None:
n_bytes = n_elems * type_size // block_size
data_offs = int(start_offs + offset_tensor[0])
item_type: npt.DTypeLike
if ggml_type == GGMLQuantizationType.F32:
if ggml_type == GGMLQuantizationType.F16:
item_count = n_elems
item_type = np.float16
elif ggml_type == GGMLQuantizationType.F32:
item_count = n_elems
item_type = np.float32
elif ggml_type == GGMLQuantizationType.F16:
elif ggml_type == GGMLQuantizationType.F64:
item_count = n_elems
item_type = np.float16
item_type = np.float64
elif ggml_type == GGMLQuantizationType.I8:
item_count = n_elems
item_type = np.int8
Expand All @@ -257,6 +260,9 @@ def _build_tensors(self, start_offs: int, fields: list[ReaderField]) -> None:
elif ggml_type == GGMLQuantizationType.I32:
item_count = n_elems
item_type = np.int32
elif ggml_type == GGMLQuantizationType.I64:
item_count = n_elems
item_type = np.int64
else:
item_count = n_bytes
item_type = np.uint8
Expand Down
12 changes: 8 additions & 4 deletions gguf-py/gguf/gguf_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,18 +204,22 @@ def add_tensor_info(
for i in range(n_dims):
self.ti_data += self._pack("Q", tensor_shape[n_dims - 1 - i])
if raw_dtype is None:
if tensor_dtype == np.float32:
dtype = GGMLQuantizationType.F32
elif tensor_dtype == np.float16:
if tensor_dtype == np.float16:
dtype = GGMLQuantizationType.F16
elif tensor_dtype == np.float32:
dtype = GGMLQuantizationType.F32
elif tensor_dtype == np.float64:
dtype = GGMLQuantizationType.F64
elif tensor_dtype == np.int8:
dtype = GGMLQuantizationType.I8
elif tensor_dtype == np.int16:
dtype = GGMLQuantizationType.I16
elif tensor_dtype == np.int32:
dtype = GGMLQuantizationType.I32
elif tensor_dtype == np.int64:
dtype = GGMLQuantizationType.I64
else:
raise ValueError("Only F32, F16, I8, I16, I32 tensors are supported for now")
raise ValueError("Only F16, F32, F64, I8, I16, I32, I64 tensors are supported for now")
else:
dtype = raw_dtype
self.ti_data += self._pack("I", dtype)
Expand Down

0 comments on commit 7ce2c77

Please sign in to comment.