From 3fb801c36bf1d56f4c8f9648aa6e7376e60c7ed9 Mon Sep 17 00:00:00 2001 From: Christian Stoeckl Date: Mon, 20 Jan 2025 16:32:36 -0500 Subject: [PATCH] Initial write of chunked dataset + compression --- jhdf/src/main/java/io/jhdf/BufferBuilder.java | 35 ++++ jhdf/src/main/java/io/jhdf/ObjectHeader.java | 104 ++++++++++-- jhdf/src/main/java/io/jhdf/Superblock.java | 52 +++++- jhdf/src/main/java/io/jhdf/Utils.java | 22 +-- .../java/io/jhdf/WritableDatasetImpl.java | 156 ++++++++++++++---- .../main/java/io/jhdf/WritableGroupImpl.java | 18 +- .../main/java/io/jhdf/WritableHdfFile.java | 29 +++- .../main/java/io/jhdf/api/WritableGroup.java | 3 + .../java/io/jhdf/api/WritiableDataset.java | 2 + jhdf/src/main/java/io/jhdf/btree/BTreeV1.java | 27 ++- .../main/java/io/jhdf/btree/BTreeV1Data.java | 54 +++++- .../dataset/chunked/ChunkedDatasetV4.java | 6 +- .../io/jhdf/filter/DeflatePipelineFilter.java | 28 ++++ jhdf/src/main/java/io/jhdf/filter/Filter.java | 2 + .../java/io/jhdf/filter/FilterManager.java | 12 +- .../io/jhdf/object/datatype/DataType.java | 2 +- .../io/jhdf/object/datatype/FixedPoint.java | 11 +- .../java/io/jhdf/object/datatype/UFixed.java | 18 ++ .../object/message/DataLayoutMessage.java | 34 ++-- .../io/jhdf/object/message/DataSpace.java | 67 +++++--- .../jhdf/object/message/DataSpaceMessage.java | 2 +- .../jhdf/object/message/DataTypeMessage.java | 2 +- .../object/message/FilterPipelineMessage.java | 36 +++- .../jhdf/object/message/GroupInfoMessage.java | 3 +- .../jhdf/object/message/LinkInfoMessage.java | 8 +- 25 files changed, 615 insertions(+), 118 deletions(-) create mode 100644 jhdf/src/main/java/io/jhdf/object/datatype/UFixed.java diff --git a/jhdf/src/main/java/io/jhdf/BufferBuilder.java b/jhdf/src/main/java/io/jhdf/BufferBuilder.java index b177a413..3f2b075a 100644 --- a/jhdf/src/main/java/io/jhdf/BufferBuilder.java +++ b/jhdf/src/main/java/io/jhdf/BufferBuilder.java @@ -80,6 +80,23 @@ public BufferBuilder writeInt(int i) { } } + public BufferBuilder writeInts(int[] ints) { + int i=0,j=0; + try { + for (j=0; j < ints.length; j++) { + if(BYTE_ORDER == LITTLE_ENDIAN) { + i = Integer.reverseBytes(ints[j]); + } else { + i=ints[j]; + } + dataOutputStream.writeInt(i); + } + return this; + } catch (IOException e) { + throw new BufferBuilderException(e); + } + } + public BufferBuilder writeLong(long l) { try { if(BYTE_ORDER == LITTLE_ENDIAN) { @@ -92,6 +109,24 @@ public BufferBuilder writeLong(long l) { } } + public BufferBuilder writeLongs(long[] longs) { + long l=0; + int j=0; + try { + for (j=0; j < longs.length; j++) { + if(BYTE_ORDER == LITTLE_ENDIAN) { + l = Long.reverseBytes(longs[j]); + } else { + l=longs[j]; + } + dataOutputStream.writeLong(l); + } + return this; + } catch (IOException e) { + throw new BufferBuilderException(e); + } + } + public ByteBuffer build() { try { ByteBuffer byteBuffer = ByteBuffer.wrap(byteArrayOutputStream.toByteArray()); diff --git a/jhdf/src/main/java/io/jhdf/ObjectHeader.java b/jhdf/src/main/java/io/jhdf/ObjectHeader.java index f8f846bb..115b1ba7 100644 --- a/jhdf/src/main/java/io/jhdf/ObjectHeader.java +++ b/jhdf/src/main/java/io/jhdf/ObjectHeader.java @@ -91,6 +91,25 @@ public static class ObjectHeaderV1 extends ObjectHeader { */ private final int referenceCount; + public ObjectHeaderV1(long address, List messages) { + super(address); + this.messages.addAll(messages); + + version = 1; + referenceCount = 0; + + // accessTime = -1; + // modificationTime = -1; + // changeTime = -1; + // birthTime = -1; + + // maximumNumberOfCompactAttributes = -1; + // maximumNumberOfDenseAttributes = -1; + + // flags = new BitSet(8); // TODO make consistent with values + // flags.set(1); // Make sizeOfChunk0 4 bytes. + } + private ObjectHeaderV1(HdfBackingStorage hdfBackingStorage, long address) { super(address); @@ -136,7 +155,8 @@ private void readMessages(HdfBackingStorage hdfBackingStorage, ByteBuffer bb, in if (m instanceof ObjectHeaderContinuationMessage) { ObjectHeaderContinuationMessage ohcm = (ObjectHeaderContinuationMessage) m; - ByteBuffer continuationBuffer = hdfBackingStorage.readBufferFromAddress(ohcm.getOffset(), ohcm.getLength()); + ByteBuffer continuationBuffer = hdfBackingStorage.readBufferFromAddress(ohcm.getOffset(), + ohcm.getLength()); readMessages(hdfBackingStorage, continuationBuffer, numberOfMessages); } @@ -162,6 +182,46 @@ public boolean isAttributeCreationOrderIndexed() { return false; // Not supported in v1 headers } + public ByteBuffer toBuffer() { + + // Start messages + ByteBuffer messagesBuffer = messagesToBuffer(); + + // finish buffer + BufferBuilder bufferBuilder = new BufferBuilder() + .writeByte(version) + .writeByte(0) // reserved + .writeShort(messages.size()) + .writeInt(1) // obj. reference count + .writeInt(messagesBuffer.capacity()) + .writeInt(0) // reseved + .writeBuffer(messagesBuffer); + + return bufferBuilder.build(); + } + + private ByteBuffer messagesToBuffer() { + BufferBuilder bufferBuilder = new BufferBuilder(); + for (Message message : messages) { + final ByteBuffer messageBuffer = message.toBuffer(); + int length = messageBuffer.capacity(); + if (message.getMessageType() != 1) { + length = length + (8 - (length % 8)); // extend to next 8 byte boundary + } + bufferBuilder.writeShort(message.getMessageType()) + .writeShort(length) + .writeBytes(message.flagsToBytes()) + .writeByte(0) // padding + .writeByte(0) // padding + .writeByte(0) // padding + .writeBuffer(messageBuffer); + for (int i=messageBuffer.capacity(); i messages) { public ByteBuffer toBuffer() { BufferBuilder bufferBuilder = new BufferBuilder() - .writeBytes(OBJECT_HEADER_V2_SIGNATURE) - .writeByte(version) - .writeBitSet(flags, 1); + .writeBytes(OBJECT_HEADER_V2_SIGNATURE) + .writeByte(version) + .writeBitSet(flags, 1); if (flags.get(TIMESTAMPS_PRESENT)) { bufferBuilder.writeInt((int) accessTime); @@ -327,7 +388,7 @@ public ByteBuffer toBuffer() { bufferBuilder.writeInt((int) birthTime); } - if(flags.get(NUMBER_OF_ATTRIBUTES_PRESENT)) { + if (flags.get(NUMBER_OF_ATTRIBUTES_PRESENT)) { // TODO min/max attributes throw new UnsupportedHdfException("Writing number of attributes"); } @@ -344,29 +405,41 @@ private ByteBuffer messagesToBuffer() { BufferBuilder bufferBuilder = new BufferBuilder(); for (Message message : messages) { final ByteBuffer messageBuffer = message.toBuffer(); - bufferBuilder.writeByte(message.getMessageType()) - .writeShort(messageBuffer.capacity()) - .writeBytes(message.flagsToBytes()) - .writeBuffer(messageBuffer); + if (message.getMessageType() == 8) { + bufferBuilder.writeByte(message.getMessageType()) + .writeShort(messageBuffer.capacity()) + .writeBytes(message.flagsToBytes()) + // .writeByte(0) // padding + // .writeByte(0) // padding + // .writeByte(0) // padding + .writeBuffer(messageBuffer); + } else { + bufferBuilder.writeByte(message.getMessageType()) + .writeShort(messageBuffer.capacity()) + .writeBytes(message.flagsToBytes()) + .writeBuffer(messageBuffer); + } } return bufferBuilder.build(); } private void readMessages(HdfBackingStorage hdfBackingStorage, ByteBuffer bb) { while (bb.remaining() >= 8) { - Message m = Message.readObjectHeaderV2Message(bb, hdfBackingStorage, this.isAttributeCreationOrderTracked()); + Message m = Message.readObjectHeaderV2Message(bb, hdfBackingStorage, + this.isAttributeCreationOrderTracked()); messages.add(m); if (m instanceof ObjectHeaderContinuationMessage) { ObjectHeaderContinuationMessage ohcm = (ObjectHeaderContinuationMessage) m; - ByteBuffer continuationBuffer = hdfBackingStorage.readBufferFromAddress(ohcm.getOffset(), ohcm.getLength()); + ByteBuffer continuationBuffer = hdfBackingStorage.readBufferFromAddress(ohcm.getOffset(), + ohcm.getLength()); // Verify continuation block signature byte[] continuationSignatureBytes = new byte[OBJECT_HEADER_V2_CONTINUATION_SIGNATURE.length]; continuationBuffer.get(continuationSignatureBytes); if (!Arrays.equals(OBJECT_HEADER_V2_CONTINUATION_SIGNATURE, continuationSignatureBytes)) { throw new HdfException( - "Object header continuation header not matched, at address: " + ohcm.getOffset()); + "Object header continuation header not matched, at address: " + ohcm.getOffset()); } // Recursively read messages @@ -429,7 +502,8 @@ public static ObjectHeader readObjectHeader(HdfBackingStorage hdfBackingStorage, } } - public static LazyInitializer lazyReadObjectHeader(HdfBackingStorage hdfBackingStorage, long address) { + public static LazyInitializer lazyReadObjectHeader(HdfBackingStorage hdfBackingStorage, + long address) { logger.debug("Creating lazy object header at address: {}", address); return new LazyInitializer() { diff --git a/jhdf/src/main/java/io/jhdf/Superblock.java b/jhdf/src/main/java/io/jhdf/Superblock.java index 0be12ae8..11db7ef0 100644 --- a/jhdf/src/main/java/io/jhdf/Superblock.java +++ b/jhdf/src/main/java/io/jhdf/Superblock.java @@ -160,7 +160,28 @@ public static class SuperblockV0V1 extends Superblock { private final long addressOfGlobalFreeSpaceIndex; private final long endOfFileAddress; private final long driverInformationBlockAddress; + private final long rootLinkNameAddress = 0L; private final long rootGroupSymbolTableAddress; + public static final long ROOT_GROUP_ADDRESS = 0x60; + + + public SuperblockV0V1() { + versionOfSuperblock = 0; + versionNumberOfTheFileFreeSpaceInformation = 0; + versionOfRootGroupSymbolTableEntry = 0; + versionOfSharedHeaderMessageFormat = 0; + sizeOfOffsets = 8; + sizeOfLengths = 8; + groupLeafNodeK = 4; + groupInternalNodeK = 16; + baseAddressByte = 0L; + addressOfGlobalFreeSpaceIndex = Constants.UNDEFINED_ADDRESS; + endOfFileAddress = Constants.UNDEFINED_ADDRESS; + driverInformationBlockAddress = Constants.UNDEFINED_ADDRESS; + + rootGroupSymbolTableAddress = ROOT_GROUP_ADDRESS; + } + private SuperblockV0V1(FileChannel fc, long address) { try { @@ -426,6 +447,35 @@ public long getDriverInformationBlockAddress() { public long getRootGroupSymbolTableAddress() { return rootGroupSymbolTableAddress; } + + public ByteBuffer toBuffer(long endOfFileAddress) { + + BufferBuilder bufferBuilder = new BufferBuilder() + .writeBytes(HDF5_FILE_SIGNATURE) + .writeByte(versionOfSuperblock) + .writeByte(versionNumberOfTheFileFreeSpaceInformation) + .writeByte(versionOfRootGroupSymbolTableEntry) + .writeByte(0) //Reserved + .writeByte(versionOfSharedHeaderMessageFormat) + .writeByte(sizeOfOffsets) + .writeByte(sizeOfLengths) + .writeByte(0) //Reserved + .writeShort(groupLeafNodeK) + .writeShort(groupInternalNodeK) + .writeInt(0) //Flags + .writeLong(baseAddressByte) + .writeLong(addressOfGlobalFreeSpaceIndex) + .writeLong(endOfFileAddress) + .writeLong(driverInformationBlockAddress) + .writeLong(rootLinkNameAddress) + .writeLong(rootGroupSymbolTableAddress) + .writeInt(0) //Cache type + .writeInt(0) //Reserved + .writeLong(0L) //Scratch + .writeLong(0L); //Scratch + + return bufferBuilder.build(); + } } public static class SuperblockV2V3 extends Superblock { @@ -446,7 +496,7 @@ public SuperblockV2V3() { sizeOfLengths = 8; baseAddressByte = 0; superblockExtensionAddress = Constants.UNDEFINED_ADDRESS; - endOfFileAddress = 500; // TODO + endOfFileAddress = 1000; // TODO rootGroupObjectHeaderAddress = WritableHdfFile.ROOT_GROUP_ADDRESS; } diff --git a/jhdf/src/main/java/io/jhdf/Utils.java b/jhdf/src/main/java/io/jhdf/Utils.java index f9c904c2..d07e390f 100644 --- a/jhdf/src/main/java/io/jhdf/Utils.java +++ b/jhdf/src/main/java/io/jhdf/Utils.java @@ -403,6 +403,17 @@ public static int[] getDimensions(Object data) { return ArrayUtils.toPrimitive(dims.toArray(new Integer[0])); } + public static int getDimensionCount(Object data) { + int dimensionCount = 0; + int dimLength = Array.getLength(data); + + while (dimLength > 0 && Array.get(data, 0).getClass().isArray()) { + data = Array.get(data, 0); + dimensionCount++; + } + return dimensionCount; + } + public static Class getType(Object obj) { final Class type; if(obj.getClass().isArray()) { @@ -456,15 +467,4 @@ private static void flattenInternal(Object data, List flat) { flat.add(data); } } - - public static int totalChunks(int[] datasetDimensions, int[] chunkDimensions) { - int chunks = 1; - for (int i = 0; i < datasetDimensions.length; i++) { - int chunksInDim = datasetDimensions[i] / chunkDimensions[i]; - // If there is a partial chunk then we need to add one chunk in this dim - if(datasetDimensions[i] % chunkDimensions[i] != 0 ) chunksInDim++; - chunks *= chunksInDim; - } - return chunks; - } } diff --git a/jhdf/src/main/java/io/jhdf/WritableDatasetImpl.java b/jhdf/src/main/java/io/jhdf/WritableDatasetImpl.java index 8c42e821..47090cc4 100644 --- a/jhdf/src/main/java/io/jhdf/WritableDatasetImpl.java +++ b/jhdf/src/main/java/io/jhdf/WritableDatasetImpl.java @@ -14,33 +14,44 @@ import io.jhdf.api.Group; import io.jhdf.api.NodeType; import io.jhdf.api.WritiableDataset; +import io.jhdf.btree.BTreeV1; +import io.jhdf.dataset.chunked.Chunk; +import io.jhdf.dataset.chunked.indexing.ChunkImpl; import io.jhdf.exceptions.HdfWritingException; +import io.jhdf.filter.Filter; +import io.jhdf.filter.FilterManager; import io.jhdf.filter.PipelineFilterWithData; import io.jhdf.object.datatype.DataType; +import io.jhdf.object.datatype.UFixed; +import io.jhdf.object.datatype.FixedPoint; import io.jhdf.object.message.AttributeInfoMessage; import io.jhdf.object.message.AttributeMessage; import io.jhdf.object.message.DataLayout; +import io.jhdf.object.message.DataLayoutMessage.ChunkedDataLayoutMessage; import io.jhdf.object.message.DataLayoutMessage.ContiguousDataLayoutMessage; import io.jhdf.object.message.DataSpace; import io.jhdf.object.message.DataSpaceMessage; import io.jhdf.object.message.DataTypeMessage; import io.jhdf.object.message.FillValueMessage; +import io.jhdf.object.message.FilterPipelineMessage; import io.jhdf.object.message.Message; import io.jhdf.storage.HdfFileChannel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; -import java.lang.reflect.Array; import java.nio.ByteBuffer; +// import java.lang.reflect.Array; +// import java.nio.ByteBuffer; import java.nio.file.Path; import java.util.ArrayList; +import java.util.BitSet; import java.util.Collections; import java.util.List; import java.util.Map; import static io.jhdf.Utils.flatten; -import static io.jhdf.Utils.stripLeadingIndex; +// import static io.jhdf.Utils.stripLeadingIndex; import static org.apache.commons.lang3.ClassUtils.primitiveToWrapper; public class WritableDatasetImpl extends AbstractWritableNode implements WritiableDataset { @@ -48,15 +59,25 @@ public class WritableDatasetImpl extends AbstractWritableNode implements Writiab private static final Logger logger = LoggerFactory.getLogger(WritableDatasetImpl.class); private final Object data; - private final DataType dataType; + private DataType dataType; + private boolean chunked=false; private final DataSpace dataSpace; public WritableDatasetImpl(Object data, String name, Group parent) { super(parent, name); - this.data = data; - this.dataType = DataType.fromObject(data); - this.dataSpace = DataSpace.fromObject(data); + + if (data.getClass() == io.jhdf.object.datatype.UFixed.class) { + this.data = ((UFixed) data).data; + this.dataType = DataType.fromObject(this.data); + this.dataSpace = DataSpace.fromObject(this.data); + ((FixedPoint) this.dataType).setSigned(false); + logger.info("Class of dataset [{}] ", data.getClass()); + } else { + this.data = data; + this.dataType = DataType.fromObject(data); + this.dataSpace = DataSpace.fromObject(data); + } } @Override @@ -186,6 +207,11 @@ public long getAddress() { throw new HdfWritingException("Address not known until written"); } + @Override + public void setChunked(boolean chunk) { + chunked = chunk; + } + @Override public boolean isLink() { return false; @@ -196,18 +222,74 @@ public boolean isAttributeCreationOrderTracked() { return false; } - @Override + @Override public long write(HdfFileChannel hdfFileChannel, long position) { - logger.info("Writing dataset [{}] at position [{}]", getPath(), position); + if (chunked) { + return writec(hdfFileChannel, position); + } else { + return writes(hdfFileChannel, position); + } + } + + + private long writec(HdfFileChannel hdfFileChannel, long position) { + + logger.info("Writing chunked dataset [{}] at position [{}]", getPath(), position); + List messages = new ArrayList<>(); + messages.add(DataSpaceMessage.create(this.dataSpace)); + messages.add(DataTypeMessage.create(this.dataType)); + messages.add(FillValueMessage.NO_FILL); + messages.add(FilterPipelineMessage.create()); + + if (!getAttributes().isEmpty()) { + AttributeInfoMessage attributeInfoMessage = AttributeInfoMessage.create(); + messages.add(attributeInfoMessage); + for (Map.Entry attribute : getAttributes().entrySet()) { + logger.info("Writing attribute [{}]", attribute.getKey()); + AttributeMessage attributeMessage = AttributeMessage.create(attribute.getKey(), attribute.getValue()); + messages.add(attributeMessage); + } + } + + int elemSize = this.dataType.getSize(); + long bTreeAddress = position + 0x100; + + //Data layout message + messages.add(ChunkedDataLayoutMessage.create(bTreeAddress, elemSize, Utils.getDimensions(data))); + + + // Write Object Header + ObjectHeader.ObjectHeaderV1 objectHeader = new ObjectHeader.ObjectHeaderV1(position, messages); + hdfFileChannel.write(objectHeader.toBuffer(), position); + + // Write data to get length of chunk + long dataAddress = bTreeAddress + 0x1000; + int dataSize = (int) writeData(hdfFileChannel, dataAddress); + + //Construct and Write BTree + + ArrayList chunks = new ArrayList<>(1); + int[] chunkOffset = new int[Utils.getDimensions(data).length]; + BitSet filterMask = BitSet.valueOf(new byte[] { 0, 0, 0, 0 }); + chunks.add(new ChunkImpl(dataAddress, dataSize, chunkOffset, filterMask)); + + BTreeV1 bTree = BTreeV1.createDataBTree(Constants.UNDEFINED_ADDRESS,Constants.UNDEFINED_ADDRESS,(short)chunks.size(),chunks); + + hdfFileChannel.write(bTree.toBuffer(), bTreeAddress); + + // Done + return dataAddress + dataSize; + } + + + private long writes(HdfFileChannel hdfFileChannel, long position) { + logger.info("Writing contiguous dataset [{}] at position [{}]", getPath(), position); List messages = new ArrayList<>(); messages.add(DataTypeMessage.create(this.dataType)); messages.add(DataSpaceMessage.create(this.dataSpace)); messages.add(FillValueMessage.NO_FILL); - // TODO will have know fixed size so don't really need these objects but for now... - ContiguousDataLayoutMessage placeholder = ContiguousDataLayoutMessage.create(Constants.UNDEFINED_ADDRESS, Constants.UNDEFINED_ADDRESS); - messages.add(placeholder); - if(!getAttributes().isEmpty()) { + if (!getAttributes().isEmpty()) { AttributeInfoMessage attributeInfoMessage = AttributeInfoMessage.create(); messages.add(attributeInfoMessage); for (Map.Entry attribute : getAttributes().entrySet()) { @@ -217,9 +299,17 @@ public long write(HdfFileChannel hdfFileChannel, long position) { } } + // TODO will have know fixed size so don't really need these objects but for now... + ContiguousDataLayoutMessage placeholder = ContiguousDataLayoutMessage.create(Constants.UNDEFINED_ADDRESS, + Constants.UNDEFINED_ADDRESS); + messages.add(placeholder); + ObjectHeader.ObjectHeaderV2 objectHeader = new ObjectHeader.ObjectHeaderV2(position, messages); int ohSize = objectHeader.toBuffer().limit(); + objectHeader = new ObjectHeader.ObjectHeaderV2(position, messages); + + // Now know where we will write the data long dataAddress = position + ohSize; long dataSize = writeData(hdfFileChannel, dataAddress); @@ -239,23 +329,31 @@ private long writeData(HdfFileChannel hdfFileChannel, long dataAddress) { logger.info("Writing data for dataset [{}] at position [{}]", getPath(), dataAddress); hdfFileChannel.position(dataAddress); - - dataType.writeData(data, getDimensions(), hdfFileChannel); - - return dataSpace.getTotalLength() * dataType.getSize(); - } - - - private static void writeDoubleData(Object data, int[] dims, ByteBuffer buffer, HdfFileChannel hdfFileChannel) { - if (dims.length > 1) { - for (int i = 0; i < dims[0]; i++) { - Object newArray = Array.get(data, i); - writeDoubleData(newArray, stripLeadingIndex(dims), buffer, hdfFileChannel); - } - } else { - buffer.asDoubleBuffer().put((double[]) data); - hdfFileChannel.write(buffer); - buffer.clear(); + if (chunked) { + Filter filter = FilterManager.ID_TO_FILTER.get(1); + + int[] filterData = {9}; + + ByteBuffer buffer = ByteBuffer.wrap(filter.encode(dataType.encodeData(data).array(),filterData)); + hdfFileChannel.write(buffer,dataAddress); + + return buffer.limit(); + } else { + dataType.writeData(data, getDimensions(), hdfFileChannel); + return dataSpace.getTotalLength() * dataType.getSize(); } } + + // private static void writeDoubleData(Object data, int[] dims, ByteBuffer buffer, HdfFileChannel hdfFileChannel) { + // if (dims.length > 1) { + // for (int i = 0; i < dims[0]; i++) { + // Object newArray = Array.get(data, i); + // writeDoubleData(newArray, stripLeadingIndex(dims), buffer, hdfFileChannel); + // } + // } else { + // buffer.asDoubleBuffer().put((double[]) data); + // hdfFileChannel.write(buffer); + // buffer.clear(); + // } + // } } diff --git a/jhdf/src/main/java/io/jhdf/WritableGroupImpl.java b/jhdf/src/main/java/io/jhdf/WritableGroupImpl.java index d915be19..e7216dc4 100644 --- a/jhdf/src/main/java/io/jhdf/WritableGroupImpl.java +++ b/jhdf/src/main/java/io/jhdf/WritableGroupImpl.java @@ -110,6 +110,13 @@ public WritiableDataset putDataset(String name, Object data) { return writableDataset; } + @Override + public WritiableDataset putDataset(WritiableDataset dataset) { + children.put(dataset.getName(), dataset); + logger.info("Added dataset [{}] to group [{}]", dataset.getName(), getPath()); + return dataset; + } + @Override public WritableGroup putGroup(String name) { if(StringUtils.isBlank(name)) { @@ -152,7 +159,8 @@ public long write(HdfFileChannel hdfFileChannel, long position) { } } - ObjectHeader.ObjectHeaderV2 objectHeader = new ObjectHeader.ObjectHeaderV2(position, messages); + //ObjectHeader.ObjectHeaderV2 objectHeader = new ObjectHeader.ObjectHeaderV2(position, messages); + ObjectHeader.ObjectHeaderV1 objectHeader = new ObjectHeader.ObjectHeaderV1(position, messages); ByteBuffer tempBuffer = objectHeader.toBuffer(); int objectHeaderSize = tempBuffer.limit(); @@ -162,6 +170,7 @@ public long write(HdfFileChannel hdfFileChannel, long position) { messages = new ArrayList<>(); messages.add(groupInfoMessage); messages.add(linkInfoMessage); + if(!getAttributes().isEmpty()) { AttributeInfoMessage attributeInfoMessage = AttributeInfoMessage.create(); @@ -174,18 +183,23 @@ public long write(HdfFileChannel hdfFileChannel, long position) { } long nextChildAddress = position + objectHeaderSize; + nextChildAddress = nextChildAddress + (16 - (nextChildAddress % 16)); + for (Map.Entry child : children.entrySet()) { LinkMessage linkMessage = LinkMessage.create(child.getKey(), nextChildAddress); messages.add(linkMessage); long endPosition = child.getValue().write(hdfFileChannel, nextChildAddress); nextChildAddress = endPosition; + nextChildAddress = nextChildAddress + (16 - (nextChildAddress % 16)); } - objectHeader = new ObjectHeader.ObjectHeaderV2(position, messages); + //objectHeader = new ObjectHeader.ObjectHeaderV2(position, messages); + objectHeader = new ObjectHeader.ObjectHeaderV1(position, messages); hdfFileChannel.write(objectHeader.toBuffer(), position); logger.info("Finished writing group [{}]", getPath()); return nextChildAddress; } + } diff --git a/jhdf/src/main/java/io/jhdf/WritableHdfFile.java b/jhdf/src/main/java/io/jhdf/WritableHdfFile.java index ca211f58..a56f32cc 100644 --- a/jhdf/src/main/java/io/jhdf/WritableHdfFile.java +++ b/jhdf/src/main/java/io/jhdf/WritableHdfFile.java @@ -42,7 +42,8 @@ public class WritableHdfFile implements WritableGroup, AutoCloseable { private final Path path; private final FileChannel fileChannel; - private final Superblock.SuperblockV2V3 superblock; + //private final Superblock.SuperblockV2V3 superblock; + private final Superblock.SuperblockV0V1 superblock; private final HdfFileChannel hdfFileChannel; private final WritableGroup rootGroup; @@ -55,11 +56,12 @@ public class WritableHdfFile implements WritableGroup, AutoCloseable { } catch (IOException e) { throw new HdfWritingException("Failed to open file: " + path.toAbsolutePath(), e); } - this.superblock = new Superblock.SuperblockV2V3(); + //this.superblock = new Superblock.SuperblockV2V3(); + this.superblock = new Superblock.SuperblockV0V1(); this.hdfFileChannel = new HdfFileChannel(this.fileChannel, this.superblock); this.rootGroup = new WritableGroupImpl(null, "/"); - this.rootGroup.putAttribute("_jHDF", getJHdfInfo()); + //this.rootGroup.putAttribute("_jHDF", getJHdfInfo()); } @Override @@ -75,9 +77,19 @@ public void close() { private void flush() { logger.info("Flushing to disk [{}]...", path.toAbsolutePath()); try { - rootGroup.write(hdfFileChannel, ROOT_GROUP_ADDRESS); - hdfFileChannel.write(getJHdfInfoBuffer()); - long endOfFile = hdfFileChannel.getFileChannel().size(); + //rootGroup.write(hdfFileChannel, ROOT_GROUP_ADDRESS); //V2 header + rootGroup.write(hdfFileChannel, superblock.getRootGroupSymbolTableAddress()); //V0 header + //hdfFileChannel.write(getJHdfInfoBuffer()); + long eof = hdfFileChannel.getFileChannel().size(); + long endOfFile = eof+256; + + // long endOfFile = eof + (4096-(eof % 4096)); + // if ((eof % 4096) == 0) { // fix unnecessary padding + // endOfFile = endOfFile-4096; + // } + // hdfFileChannel.write(ByteBuffer.allocate((int)(endOfFile-eof)),eof); //pad to 4k for chunked storage + + hdfFileChannel.write(ByteBuffer.allocate(256),eof); hdfFileChannel.write(superblock.toBuffer(endOfFile), 0L); logger.info("Flushed to disk [{}] file is [{}] bytes", path.toAbsolutePath(), endOfFile); } catch (IOException e) { @@ -99,6 +111,11 @@ public WritiableDataset putDataset(String name, Object data) { return rootGroup.putDataset(name, data); } + // @Override + public WritiableDataset putDataset(WritiableDataset dataset) { + return rootGroup.putDataset(dataset); + } + @Override public WritableGroup putGroup(String name) { return rootGroup.putGroup(name); diff --git a/jhdf/src/main/java/io/jhdf/api/WritableGroup.java b/jhdf/src/main/java/io/jhdf/api/WritableGroup.java index 574d51b9..5bbca43f 100644 --- a/jhdf/src/main/java/io/jhdf/api/WritableGroup.java +++ b/jhdf/src/main/java/io/jhdf/api/WritableGroup.java @@ -14,5 +14,8 @@ public interface WritableGroup extends Group, WritableNode { WritiableDataset putDataset(String name, Object data); + WritiableDataset putDataset(WritiableDataset dataset); + + WritableGroup putGroup(String name); } diff --git a/jhdf/src/main/java/io/jhdf/api/WritiableDataset.java b/jhdf/src/main/java/io/jhdf/api/WritiableDataset.java index dea4263f..1ed72d29 100644 --- a/jhdf/src/main/java/io/jhdf/api/WritiableDataset.java +++ b/jhdf/src/main/java/io/jhdf/api/WritiableDataset.java @@ -12,4 +12,6 @@ public interface WritiableDataset extends Dataset, WritableNode { + public void setChunked(boolean chunked); + } diff --git a/jhdf/src/main/java/io/jhdf/btree/BTreeV1.java b/jhdf/src/main/java/io/jhdf/btree/BTreeV1.java index 5c9242db..66095a43 100644 --- a/jhdf/src/main/java/io/jhdf/btree/BTreeV1.java +++ b/jhdf/src/main/java/io/jhdf/btree/BTreeV1.java @@ -9,6 +9,9 @@ */ package io.jhdf.btree; +import io.jhdf.dataset.chunked.Chunk; +// import io.jhdf.BufferBuilder; +// import io.jhdf.Constants; import io.jhdf.Utils; import io.jhdf.exceptions.HdfException; import io.jhdf.storage.HdfBackingStorage; @@ -18,6 +21,7 @@ import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.Arrays; +import java.util.ArrayList; import java.util.List; /** @@ -34,7 +38,7 @@ public abstract class BTreeV1 { private static final Logger logger = LoggerFactory.getLogger(BTreeV1.class); - private static final byte[] BTREE_NODE_V1_SIGNATURE = "TREE".getBytes(StandardCharsets.US_ASCII); + protected static final byte[] BTREE_NODE_V1_SIGNATURE = "TREE".getBytes(StandardCharsets.US_ASCII); private static final int HEADER_BYTES = 6; /** @@ -80,6 +84,10 @@ public static BTreeV1Data createDataBTree(HdfBackingStorage hdfBackingStorage, l } } + public static BTreeV1Data createDataBTree(long leftSibling, long rightSibling, short entries, ArrayList chunks) { + return new BTreeV1Data.BTreeV1DataLeafNode(leftSibling, rightSibling, entries, chunks); + } + public static ByteBuffer readHeaderAndValidateSignature(HdfBackingStorage fc, long address) { ByteBuffer header = fc.readBufferFromAddress(address, HEADER_BYTES); @@ -109,6 +117,19 @@ public static ByteBuffer readHeaderAndValidateSignature(HdfBackingStorage fc, lo } + BTreeV1(long leftSibling, long rightSibling, short entries, ArrayList chunks) { + this.address = 0; + + entriesUsed = entries; + logger.trace("Entries = {}", entriesUsed); + + leftSiblingAddress = leftSibling; + logger.trace("left address = {}", leftSiblingAddress); + + rightSiblingAddress = rightSibling; + logger.trace("right address = {}", rightSiblingAddress); + } + public int getEntriesUsed() { return entriesUsed; } @@ -127,4 +148,8 @@ public long getAddress() { public abstract List getChildAddresses(); + public ByteBuffer toBuffer() { + return ByteBuffer.allocate(0); + }; + } diff --git a/jhdf/src/main/java/io/jhdf/btree/BTreeV1Data.java b/jhdf/src/main/java/io/jhdf/btree/BTreeV1Data.java index 15d52ee9..c02e44cf 100644 --- a/jhdf/src/main/java/io/jhdf/btree/BTreeV1Data.java +++ b/jhdf/src/main/java/io/jhdf/btree/BTreeV1Data.java @@ -9,6 +9,8 @@ */ package io.jhdf.btree; +import io.jhdf.BufferBuilder; +import io.jhdf.Constants; import io.jhdf.Superblock; import io.jhdf.Utils; import io.jhdf.dataset.chunked.Chunk; @@ -34,6 +36,10 @@ private BTreeV1Data(HdfBackingStorage hdfBackingStorage, long address) { super(hdfBackingStorage, address); } + private BTreeV1Data(long leftSibling, long rightSibling, short entries, ArrayList chunks) { + super(leftSibling, rightSibling, entries, chunks); + } + /** * @return the raw data chunks address from this b-tree */ @@ -63,9 +69,14 @@ private BTreeV1Data(HdfBackingStorage hdfBackingStorage, long address) { bb.position(bb.position() + keySize); } + BTreeV1DataLeafNode(long leftSibling, long rightSibling, short entries, ArrayList cList) { + super(leftSibling, rightSibling, entries, cList); + chunks = cList; + }; + private Chunk readKeyAsChunk(Superblock sb, int dataDimensions, ByteBuffer bb) { final int chunkSize = Utils.readBytesAsUnsignedInt(bb, 4); - final BitSet filterMask = BitSet.valueOf(new byte[]{bb.get(), bb.get(), bb.get(), bb.get()}); + final BitSet filterMask = BitSet.valueOf(new byte[] { bb.get(), bb.get(), bb.get(), bb.get() }); final int[] chunkOffset = new int[dataDimensions]; for (int j = 0; j < dataDimensions; j++) { chunkOffset[j] = Utils.readBytesAsUnsignedInt(bb, 8); @@ -88,6 +99,40 @@ public List getChildAddresses() { public List getChunks() { return chunks; } + + @Override + public ByteBuffer toBuffer() { + long[] start_indices = {0L,0L,0L}; + long[] end_indices = {2L,2L,2L}; + long dataAddress = chunks.get(0).getAddress(); + int dataSize = chunks.get(0).getSize(); + return new BufferBuilder() + .writeBytes(BTREE_NODE_V1_SIGNATURE) // Signature + .writeByte(1) // Type + .writeByte(0) // Level + .writeShort(1) // Entries + .writeLong(Constants.UNDEFINED_ADDRESS) + .writeLong(Constants.UNDEFINED_ADDRESS) + .writeInt(dataSize) // size of data + .writeInt(0) // mask + .writeLongs(start_indices) // array start + .writeLong(0L) // element start + .writeLong(dataAddress) + .writeInt(0) // size 0 for last key + .writeInt(0) // mask + .writeLongs(end_indices) // array size + .writeLong(2L) // element size + .writeLong(0L) // fill space (no data pointer) + .writeLongs(start_indices) // array start + .writeLongs(start_indices) // array start + .writeLongs(start_indices) // array start + .writeLongs(start_indices) // array start + .writeLongs(start_indices) // array start + .writeLongs(start_indices) // array start + .writeLongs(start_indices) // array start + .writeLongs(start_indices) // array start + .build(); + } } /* package */ static class BTreeV1DataNonLeafNode extends BTreeV1Data { @@ -103,13 +148,15 @@ public List getChunks() { final int keysAndPointersBytes = keyBytes + childPointerBytes; final long keysAddress = address + 8L + 2L * hdfBackingStorage.getSizeOfOffsets(); - final ByteBuffer keysAndPointersBuffer = hdfBackingStorage.readBufferFromAddress(keysAddress, keysAndPointersBytes); + final ByteBuffer keysAndPointersBuffer = hdfBackingStorage.readBufferFromAddress(keysAddress, + keysAndPointersBytes); childNodes = new ArrayList<>(entriesUsed); for (int i = 0; i < entriesUsed; i++) { keysAndPointersBuffer.position(keysAndPointersBuffer.position() + keySize); - long childAddress = Utils.readBytesAsUnsignedLong(keysAndPointersBuffer, hdfBackingStorage.getSizeOfOffsets()); + long childAddress = Utils.readBytesAsUnsignedLong(keysAndPointersBuffer, + hdfBackingStorage.getSizeOfOffsets()); childNodes.add(BTreeV1.createDataBTree(hdfBackingStorage, childAddress, dataDimensions)); } } @@ -131,5 +178,6 @@ public List getChildAddresses() { } return childAddresses; } + } } diff --git a/jhdf/src/main/java/io/jhdf/dataset/chunked/ChunkedDatasetV4.java b/jhdf/src/main/java/io/jhdf/dataset/chunked/ChunkedDatasetV4.java index 5ec36330..0320af76 100644 --- a/jhdf/src/main/java/io/jhdf/dataset/chunked/ChunkedDatasetV4.java +++ b/jhdf/src/main/java/io/jhdf/dataset/chunked/ChunkedDatasetV4.java @@ -17,9 +17,9 @@ import io.jhdf.dataset.chunked.indexing.EmptyChunkIndex; import io.jhdf.dataset.chunked.indexing.ExtensibleArrayIndex; import io.jhdf.dataset.chunked.indexing.FixedArrayIndex; -import io.jhdf.dataset.chunked.indexing.ImplicitChunkIndex; import io.jhdf.dataset.chunked.indexing.SingleChunkIndex; import io.jhdf.exceptions.HdfException; +import io.jhdf.exceptions.UnsupportedHdfException; import io.jhdf.object.message.DataLayoutMessage.ChunkedDataLayoutMessageV4; import io.jhdf.storage.HdfBackingStorage; import org.apache.commons.lang3.ArrayUtils; @@ -85,9 +85,7 @@ protected Map initialize() { chunkIndex = new SingleChunkIndex(layoutMessage, datasetInfo); break; case 2: // Implicit - logger.debug("Reading implicit indexed dataset"); - chunkIndex = new ImplicitChunkIndex(layoutMessage.getAddress(), datasetInfo); - break; + throw new UnsupportedHdfException("Implicit indexing is currently not supported"); case 3: // Fixed array logger.debug("Reading fixed array indexed dataset"); chunkIndex = new FixedArrayIndex(hdfBackingStorage, layoutMessage.getAddress(), datasetInfo); diff --git a/jhdf/src/main/java/io/jhdf/filter/DeflatePipelineFilter.java b/jhdf/src/main/java/io/jhdf/filter/DeflatePipelineFilter.java index 292c12fa..557d8281 100644 --- a/jhdf/src/main/java/io/jhdf/filter/DeflatePipelineFilter.java +++ b/jhdf/src/main/java/io/jhdf/filter/DeflatePipelineFilter.java @@ -16,6 +16,8 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.zip.DataFormatException; +import java.util.zip.Deflater; +import java.util.zip.DeflaterOutputStream; import java.util.zip.Inflater; public class DeflatePipelineFilter implements Filter { @@ -68,4 +70,30 @@ public byte[] decode(byte[] compressedData, int[] filterData) { inflater.end(); } } + + public byte[] encode(byte[] data, int[] filterData) { + + try (final ByteArrayOutputStream baos = new ByteArrayOutputStream(data.length)) { + // Setup the deflater + DeflaterOutputStream daos = new DeflaterOutputStream(baos,new Deflater(9)); + + // Do the compression + daos.write(data); + daos.close(); + + byte[] output = baos.toByteArray(); + + if (logger.isDebugEnabled()) { + logger.debug("Decompressed chunk. Decompressed size = {} bytes, Compressed size = {}", + data.length, output.length); + } + + byte[] test = decode(output,filterData); + + return output; + + } catch (IOException e) { + throw new HdfFilterException("Inflating failed", e); + } + } } diff --git a/jhdf/src/main/java/io/jhdf/filter/Filter.java b/jhdf/src/main/java/io/jhdf/filter/Filter.java index ebd76967..03a1db6b 100644 --- a/jhdf/src/main/java/io/jhdf/filter/Filter.java +++ b/jhdf/src/main/java/io/jhdf/filter/Filter.java @@ -45,4 +45,6 @@ public interface Filter { */ byte[] decode(byte[] encodedData, int[] filterData); + byte[] encode(byte[] data, int[] filterData); + } diff --git a/jhdf/src/main/java/io/jhdf/filter/FilterManager.java b/jhdf/src/main/java/io/jhdf/filter/FilterManager.java index bbb932bb..d1a1bf76 100644 --- a/jhdf/src/main/java/io/jhdf/filter/FilterManager.java +++ b/jhdf/src/main/java/io/jhdf/filter/FilterManager.java @@ -31,18 +31,18 @@ public enum FilterManager { private static final Logger logger = LoggerFactory.getLogger(FilterManager.class); - private static final Map ID_TO_FILTER = new HashMap<>(); + public static final Map ID_TO_FILTER = new HashMap<>(); static { logger.info("Initializing HDF5 filters..."); // Load the built in filters addFilter(new DeflatePipelineFilter()); - addFilter(new ByteShuffleFilter()); - addFilter(new FletcherChecksumFilter()); - addFilter(new LzfFilter()); - addFilter(new BitShuffleFilter()); - addFilter(new Lz4Filter()); + // addFilter(new ByteShuffleFilter()); + // addFilter(new FletcherChecksumFilter()); + // addFilter(new LzfFilter()); + // addFilter(new BitShuffleFilter()); + // addFilter(new Lz4Filter()); // Add dynamically loaded filters ServiceLoader serviceLoader = ServiceLoader.load(Filter.class); diff --git a/jhdf/src/main/java/io/jhdf/object/datatype/DataType.java b/jhdf/src/main/java/io/jhdf/object/datatype/DataType.java index 115b0762..0ef2b5c4 100644 --- a/jhdf/src/main/java/io/jhdf/object/datatype/DataType.java +++ b/jhdf/src/main/java/io/jhdf/object/datatype/DataType.java @@ -95,7 +95,7 @@ protected DataType(ByteBuffer bb) { } public static DataType fromObject(Object data) { - final Class type = Utils.getType(data); + Class type = Utils.getType(data); if (type == byte.class || type == Byte.class) { return new FixedPoint(1); diff --git a/jhdf/src/main/java/io/jhdf/object/datatype/FixedPoint.java b/jhdf/src/main/java/io/jhdf/object/datatype/FixedPoint.java index 6abd1b31..189f6421 100644 --- a/jhdf/src/main/java/io/jhdf/object/datatype/FixedPoint.java +++ b/jhdf/src/main/java/io/jhdf/object/datatype/FixedPoint.java @@ -38,7 +38,7 @@ public class FixedPoint extends DataType implements OrderedDataType, WritableDat private final ByteOrder order; private final boolean lowPadding; private final boolean highPadding; - private final boolean signed; + private boolean signed; private final short bitOffset; private final short bitPrecision; @@ -87,6 +87,10 @@ public boolean isSigned() { return signed; } + public void setSigned(boolean sig) { + signed = sig; + } + public short getBitOffset() { return bitOffset; } @@ -359,12 +363,15 @@ public ByteBuffer toBuffer() { @Override public void writeData(Object data, int[] dimensions, HdfFileChannel hdfFileChannel) { + if (data.getClass() == io.jhdf.object.datatype.UFixed.class) { + data = ((UFixed) data).data; + } + if (data.getClass().isArray()) { writeArrayData(data, dimensions, hdfFileChannel); } else { writeScalarData(data, hdfFileChannel); } - } private void writeScalarData(Object data, HdfFileChannel hdfFileChannel) { diff --git a/jhdf/src/main/java/io/jhdf/object/datatype/UFixed.java b/jhdf/src/main/java/io/jhdf/object/datatype/UFixed.java new file mode 100644 index 00000000..62d08ad7 --- /dev/null +++ b/jhdf/src/main/java/io/jhdf/object/datatype/UFixed.java @@ -0,0 +1,18 @@ +/* + * This file is part of jHDF. A pure Java library for accessing HDF5 files. + * + * https://jhdf.io + * + * Copyright (c) 2024 James Mudd + * + * MIT License see 'LICENSE' file + */ +package io.jhdf.object.datatype; + +public class UFixed { + public Object data; + + public UFixed(Object dat) { + data = dat; + } +} diff --git a/jhdf/src/main/java/io/jhdf/object/message/DataLayoutMessage.java b/jhdf/src/main/java/io/jhdf/object/message/DataLayoutMessage.java index 4f2d9ee8..e0ab7be7 100644 --- a/jhdf/src/main/java/io/jhdf/object/message/DataLayoutMessage.java +++ b/jhdf/src/main/java/io/jhdf/object/message/DataLayoutMessage.java @@ -41,7 +41,8 @@ public static DataLayoutMessage createDataLayoutMessage(ByteBuffer bb, Superbloc case 4: return readV3V4Message(bb, sb, flags, version); default: - throw new UnsupportedHdfException("Unsupported data layout message version detected. Detected version = " + version); + throw new UnsupportedHdfException( + "Unsupported data layout message version detected. Detected version = " + version); } } @@ -175,11 +176,11 @@ public long getSize() { @Override public ByteBuffer toBuffer() { return new BufferBuilder() - .writeByte(3) // Version - .writeByte(1) // Contiguous Storage - .writeLong(address) - .writeLong(size) - .build(); + .writeByte(3) // Version + .writeByte(1) // Contiguous Storage + .writeLong(address) + .writeLong(size) + .build(); } } @@ -207,6 +208,10 @@ private ChunkedDataLayoutMessage(ByteBuffer bb, Superblock sb, BitSet flags) { size = Utils.readBytesAsUnsignedInt(bb, 4); } + public static ChunkedDataLayoutMessage create(long bTreeAddress, int size, int[] chunkDimensions) { + return new ChunkedDataLayoutMessage(Message.BASIC_FLAGS, bTreeAddress, size, chunkDimensions); + } + @Override public DataLayout getDataLayout() { return DataLayout.CHUNKED; @@ -226,7 +231,16 @@ public int[] getChunkDimensions() { @Override public ByteBuffer toBuffer() { - return null; + return new BufferBuilder() + .writeByte(3) // Version + .writeByte(2) // Chunked Storage + .writeByte(chunkDimensions.length + 1) + .writeLong(bTreeAddress) // + .writeInts(chunkDimensions) + .writeInt(size) + .writeInt(0) // padding +// .writeByte(0) // padding + .build(); } } @@ -256,7 +270,7 @@ public static class ChunkedDataLayoutMessageV4 extends DataLayoutMessage { private ChunkedDataLayoutMessageV4(ByteBuffer bb, Superblock sb, BitSet flags) { super(flags); - final BitSet chunkedFlags = BitSet.valueOf(new byte[]{bb.get()}); + final BitSet chunkedFlags = BitSet.valueOf(new byte[] { bb.get() }); final int chunkDimensionality = bb.get(); final int dimSizeBytes = bb.get(); @@ -272,7 +286,8 @@ private ChunkedDataLayoutMessageV4(ByteBuffer bb, Superblock sb, BitSet flags) { if (chunkedFlags.get(SINGLE_INDEX_WITH_FILTER)) { isFilteredSingleChunk = true; sizeOfFilteredSingleChunk = Utils.readBytesAsUnsignedInt(bb, sb.getSizeOfLengths()); - filterMaskFilteredSingleChunk = BitSet.valueOf(new byte[]{bb.get(), bb.get(), bb.get(), bb.get()}); + filterMaskFilteredSingleChunk = BitSet + .valueOf(new byte[] { bb.get(), bb.get(), bb.get(), bb.get() }); } break; @@ -382,5 +397,4 @@ public int getMessageType() { return MESSAGE_TYPE; } - } diff --git a/jhdf/src/main/java/io/jhdf/object/message/DataSpace.java b/jhdf/src/main/java/io/jhdf/object/message/DataSpace.java index f22ca298..f170840a 100644 --- a/jhdf/src/main/java/io/jhdf/object/message/DataSpace.java +++ b/jhdf/src/main/java/io/jhdf/object/message/DataSpace.java @@ -86,12 +86,18 @@ public static DataSpace readDataSpace(ByteBuffer bb, Superblock sb) { public static DataSpace fromObject(Object data) { if(data.getClass().isArray()) { int[] dimensions1 = Utils.getDimensions(data); - return new DataSpace((byte) 2, - false, - dimensions1, - Arrays.stream(dimensions1).asLongStream().toArray(), - (byte) 1 // Simple - ); + // return new DataSpace((byte) 2, + // false, + // dimensions1, + // Arrays.stream(dimensions1).asLongStream().toArray(), + // (byte) 1 // Simple + // ); + return new DataSpace((byte) 1, + false, + dimensions1, + Arrays.stream(dimensions1).asLongStream().toArray(), + (byte) 1 // Simple + ); } else { // Scalar return new DataSpace((byte) 2, @@ -141,19 +147,40 @@ public boolean isMaxSizesPresent() { } public ByteBuffer toBuffer() { - BitSet flags = new BitSet(8); - flags.set(MAX_SIZES_PRESENT_BIT, maxSizesPresent); - BufferBuilder bufferBuilder = new BufferBuilder() - .writeByte(version) // Version - .writeByte(dimensions.length) // no dims - .writeBitSet(flags, 1) - .writeByte(type); - - for (int dimension : dimensions) { - // TODO should be size of length - bufferBuilder.writeLong(dimension); - } - - return bufferBuilder.build(); + if (version == 1) { + BitSet flags = new BitSet(8); + flags.set(MAX_SIZES_PRESENT_BIT, maxSizesPresent); + BufferBuilder bufferBuilder = new BufferBuilder() + .writeByte(version) // Version + .writeByte(dimensions.length) // number of dims + .writeBitSet(flags, 1) + .writeByte(0) //reserved + .writeInt(0); //reserved + + for (int dimension : dimensions) { + bufferBuilder.writeLong(dimension); + } + if (maxSizesPresent) { + bufferBuilder.writeLongs(maxSizes); + } + return bufferBuilder.build(); + } else { //version == 2 + BitSet flags = new BitSet(8); + flags.set(MAX_SIZES_PRESENT_BIT, maxSizesPresent); + BufferBuilder bufferBuilder = new BufferBuilder() + .writeByte(version) // Version + .writeByte(dimensions.length) // no dims + .writeBitSet(flags, 1) + .writeByte(type); + + for (int dimension : dimensions) { + // TODO should be size of length + bufferBuilder.writeLong(dimension); + } + return bufferBuilder.build(); + } + + + } } diff --git a/jhdf/src/main/java/io/jhdf/object/message/DataSpaceMessage.java b/jhdf/src/main/java/io/jhdf/object/message/DataSpaceMessage.java index 1b8319e5..15bf984b 100644 --- a/jhdf/src/main/java/io/jhdf/object/message/DataSpaceMessage.java +++ b/jhdf/src/main/java/io/jhdf/object/message/DataSpaceMessage.java @@ -3,7 +3,7 @@ * * https://jhdf.io * - * Copyright (c) 2025 James Mudd + * Copyright (c) 2024 James Mudd * * MIT License see 'LICENSE' file */ diff --git a/jhdf/src/main/java/io/jhdf/object/message/DataTypeMessage.java b/jhdf/src/main/java/io/jhdf/object/message/DataTypeMessage.java index c6f4abfd..42ae3ab7 100644 --- a/jhdf/src/main/java/io/jhdf/object/message/DataTypeMessage.java +++ b/jhdf/src/main/java/io/jhdf/object/message/DataTypeMessage.java @@ -3,7 +3,7 @@ * * https://jhdf.io * - * Copyright (c) 2025 James Mudd + * Copyright (c) 2024 James Mudd * * MIT License see 'LICENSE' file */ diff --git a/jhdf/src/main/java/io/jhdf/object/message/FilterPipelineMessage.java b/jhdf/src/main/java/io/jhdf/object/message/FilterPipelineMessage.java index 396c85fb..0c7d82f5 100644 --- a/jhdf/src/main/java/io/jhdf/object/message/FilterPipelineMessage.java +++ b/jhdf/src/main/java/io/jhdf/object/message/FilterPipelineMessage.java @@ -3,14 +3,20 @@ * * https://jhdf.io * - * Copyright (c) 2025 James Mudd + * Copyright (c) 2024 James Mudd * * MIT License see 'LICENSE' file */ package io.jhdf.object.message; +import io.jhdf.BufferBuilder; import io.jhdf.Utils; import io.jhdf.exceptions.UnsupportedHdfException; +import io.jhdf.filter.FilterPipeline; +import io.jhdf.object.datatype.DataType; +import io.jhdf.filter.FilterManager; +import io.jhdf.filter.Filter; + import org.apache.commons.lang3.ArrayUtils; import java.nio.ByteBuffer; @@ -98,6 +104,17 @@ public FilterPipelineMessage(ByteBuffer bb, BitSet messageFlags) { } + private FilterPipelineMessage() { + boolean optional=false; + int[] data = new int[1]; + filters = new ArrayList<>(1); + filters.add(new FilterInfo(1,"deflate",optional,data)); + } + + public static FilterPipelineMessage create() { + return new FilterPipelineMessage(); + } + public List getFilters() { return filters; } @@ -145,5 +162,22 @@ public int getMessageType() { return MESSAGE_TYPE; } + @Override + public ByteBuffer toBuffer() { + return new BufferBuilder() + .writeByte(1) // Version + .writeByte(1) // number of filters + .writeShort(0) // padding + .writeInt(0) // padding + .writeShort(1) // filter ID + .writeShort(8) // length of name + .writeShort(1) // flags + .writeShort(1) // number of client data + .writeBytes("deflate".getBytes()) + .writeByte(0) // zero terminate string + .writeInt(9) + .build(); + } + } diff --git a/jhdf/src/main/java/io/jhdf/object/message/GroupInfoMessage.java b/jhdf/src/main/java/io/jhdf/object/message/GroupInfoMessage.java index eba206a6..0a4e419c 100644 --- a/jhdf/src/main/java/io/jhdf/object/message/GroupInfoMessage.java +++ b/jhdf/src/main/java/io/jhdf/object/message/GroupInfoMessage.java @@ -95,7 +95,8 @@ public int getMessageType() { public ByteBuffer toBuffer() { BufferBuilder bufferBuilder = new BufferBuilder(); bufferBuilder.writeByte(version); - bufferBuilder.writeBitSet(flags, 1); + //bufferBuilder.writeBitSet(flags, 1); + bufferBuilder.writeByte(1); if (flags.get(LINK_PHASE_CHANGE_PRESENT)) { bufferBuilder.writeShort(maximumCompactLinks); bufferBuilder.writeShort(minimumDenseLinks); diff --git a/jhdf/src/main/java/io/jhdf/object/message/LinkInfoMessage.java b/jhdf/src/main/java/io/jhdf/object/message/LinkInfoMessage.java index d166d4ed..6cf0a0e7 100644 --- a/jhdf/src/main/java/io/jhdf/object/message/LinkInfoMessage.java +++ b/jhdf/src/main/java/io/jhdf/object/message/LinkInfoMessage.java @@ -102,8 +102,8 @@ public ByteBuffer toBuffer() { return bufferBuilder.build(); } - private LinkInfoMessage() { - super(new BitSet(1)); + private LinkInfoMessage(BitSet messageFlags) { + super(messageFlags); this.flags = new BitSet(1); this.version = 0; this.maximumCreationIndex = Constants.UNDEFINED_ADDRESS; @@ -113,6 +113,8 @@ private LinkInfoMessage() { } public static LinkInfoMessage createBasic() { - return new LinkInfoMessage(); + BitSet messageFlags = new BitSet(1); + messageFlags.set(1); + return new LinkInfoMessage(messageFlags); } }