diff --git a/mnemonic-hadoop/mnemonic-hadoop-mapreduce/src/main/java/org/apache/mnemonic/hadoop/mapred/MneInputFormat.java b/mnemonic-hadoop/mnemonic-hadoop-mapreduce/src/main/java/org/apache/mnemonic/hadoop/mapred/MneInputFormat.java index 0432cc75..0d48ab32 100644 --- a/mnemonic-hadoop/mnemonic-hadoop-mapreduce/src/main/java/org/apache/mnemonic/hadoop/mapred/MneInputFormat.java +++ b/mnemonic-hadoop/mnemonic-hadoop-mapreduce/src/main/java/org/apache/mnemonic/hadoop/mapred/MneInputFormat.java @@ -31,16 +31,31 @@ /** * A Mnemonic input format that satisfies the org.apache.hadoop.mapred API. + * + * @param The type parameter representing the mnemonic durable input value. + * @param The type parameter representing the value. */ public class MneInputFormat, V> extends FileInputFormat { + /** + * Creates a RecordReader for the given InputSplit. + * + * @param inputSplit The input split containing the information about the file to read. + * @param jobConf The job configuration containing the job settings. + * @param reporter The reporter to report progress, status updates, etc. + * @return A RecordReader to read the records from the input split. + * @throws IOException If an error occurs while creating the RecordReader. + */ @Override - public RecordReader - getRecordReader(InputSplit inputSpilt, - JobConf jobConf, - Reporter reporter) throws IOException { - MneMapredRecordReader reader = - new MneMapredRecordReader((FileSplit) inputSpilt, jobConf); + public RecordReader getRecordReader(InputSplit inputSplit, JobConf jobConf, Reporter reporter) + throws IOException { + // Cast the input split to a FileSplit to work with file-based input splits + FileSplit fileSplit = (FileSplit) inputSplit; + + // Create a new instance of MneMapredRecordReader with the given file split and job configuration + MneMapredRecordReader reader = new MneMapredRecordReader(fileSplit, jobConf); + + // Return the created RecordReader return reader; }