diff --git a/doc/config.toml b/doc/config.toml
index 8f5c40db097..f53f8e412f5 100644
--- a/doc/config.toml
+++ b/doc/config.toml
@@ -259,6 +259,10 @@ url = "http://www.apache.org/security/"
version = "1.12.0"
url = "https://avro.apache.org/docs/1.12.0/"
+[[params.versions]]
+version = "1.11.4"
+url = "https://avro.apache.org/docs/1.11.4/"
+
[[params.versions]]
version = "1.11.3"
url = "https://avro.apache.org/docs/1.11.3/"
diff --git a/doc/content/en/avro.rdf b/doc/content/en/avro.rdf
index 0c3b6adcd4b..ba44ba09860 100644
--- a/doc/content/en/avro.rdf
+++ b/doc/content/en/avro.rdf
@@ -52,9 +52,9 @@
-->
- Avro 1.11.3
- 2023-09-17
- 1.11.3
+ Avro 1.12.0
+ 2024-08-05
+ 1.12.0
diff --git a/doc/content/en/blog/releases/avro-1.11.4-released.md b/doc/content/en/blog/releases/avro-1.11.4-released.md
new file mode 100755
index 00000000000..c5f36146dbd
--- /dev/null
+++ b/doc/content/en/blog/releases/avro-1.11.4-released.md
@@ -0,0 +1,49 @@
+---
+title: "Avro 1.11.4"
+linkTitle: "Avro 1.11.4"
+date: 2024-09-22
+---
+
+
+
+The Apache Avro community is pleased to announce the release of Avro 1.11.4!
+
+All signed release artifacts, signatures and verification instructions can
+be found }}">here
+
+This release [addresses 4 Jira issues](https://issues.apache.org/jira/issues/?jql=project%3DAVRO%20AND%20fixVersion%3D1.11.4)
+only in the Java SDK. All other SDKs have no difference to their 1.12.0 release, so please use 1.12.0 for them!
+
+## Highlights
+
+Java
+- [AVRO-3985](https://issues.apache.org/jira/browse/AVRO-3985): Restrict trusted packages in ReflectData and SpecificData
+- [AVRO-3989](https://issues.apache.org/jira/browse/AVRO-3989): Maven Plugin Always Recompiles IDL Files
+- [AVRO-3880](https://issues.apache.org/jira/browse/AVRO-3880): Upgrade maven-antrun-plugin to 3.1.0
+- [AVRO-3748](https://issues.apache.org/jira/browse/AVRO-3748): issue with DataFileSeekableInput.SeekableInputStream.skip
+
+
+## Language SDK / Convenience artifacts
+
+* Java: https://repo1.maven.org/maven2/org/apache/avro/avro/1.11.4/
+
+Thanks to everyone for contributing!
diff --git a/doc/content/en/docs/1.11.4/Getting started (Java)/_index.md b/doc/content/en/docs/1.11.4/Getting started (Java)/_index.md
new file mode 100644
index 00000000000..a470a0291c8
--- /dev/null
+++ b/doc/content/en/docs/1.11.4/Getting started (Java)/_index.md
@@ -0,0 +1,289 @@
+---
+categories: []
+tags: ["java"]
+title: "Getting Started (Java)"
+linkTitle: "Getting Started (Java)"
+weight: 2
+---
+
+
+
+This is a short guide for getting started with Apache Avro™ using Java. This guide only covers using Avro for data serialization; see Patrick Hunt's [Avro RPC Quick Start](https://github.com/phunt/avro-rpc-quickstart) for a good introduction to using Avro for RPC.
+
+## Download
+
+Avro implementations for C, C++, C#, Java, PHP, Python, and Ruby can be downloaded from the [Apache Avro™ Download]({{< relref "/project/download" >}}) page. This guide uses Avro 1.11.4, the latest version at the time of writing. For the examples in this guide, download avro-1.11.4.jar and avro-tools-1.11.4.jar.
+
+Alternatively, if you are using Maven, add the following dependency to your POM:
+
+```xml
+
+ org.apache.avro
+ avro
+ 1.11.4
+
+```
+
+As well as the Avro Maven plugin (for performing code generation):
+
+```xml
+
+ org.apache.avro
+ avro-maven-plugin
+ 1.11.4
+
+
+ generate-sources
+
+ schema
+
+
+ ${project.basedir}/src/main/avro/
+ ${project.basedir}/src/main/java/
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+ 1.8
+ 1.8
+
+
+```
+
+You may also build the required Avro jars from source. Building Avro is beyond the scope of this guide; see the Build Documentation page in the wiki for more information.
+
+## Defining a schema
+
+Avro schemas are defined using JSON. Schemas are composed of primitive types (null, boolean, int, long, float, double, bytes, and string) and complex types (record, enum, array, map, union, and fixed). You can learn more about Avro schemas and types from the specification, but for now let's start with a simple schema example, user.avsc:
+
+```json
+{"namespace": "example.avro",
+ "type": "record",
+ "name": "User",
+ "fields": [
+ {"name": "name", "type": "string"},
+ {"name": "favorite_number", "type": ["int", "null"]},
+ {"name": "favorite_color", "type": ["string", "null"]}
+ ]
+}
+```
+
+This schema defines a record representing a hypothetical user. (Note that a schema file can only contain a single schema definition.) At minimum, a record definition must include its type ("type": "record"), a name ("name": "User"), and fields, in this case name, favorite_number, and favorite_color. We also define a namespace ("namespace": "example.avro"), which together with the name attribute defines the "full name" of the schema (example.avro.User in this case).
+
+Fields are defined via an array of objects, each of which defines a name and type (other attributes are optional, see the record specification for more details). The type attribute of a field is another schema object, which can be either a primitive or complex type. For example, the name field of our User schema is the primitive type string, whereas the favorite_number and favorite_color fields are both unions, represented by JSON arrays. unions are a complex type that can be any of the types listed in the array; e.g., favorite_number can either be an int or null, essentially making it an optional field.
+
+## Serializing and deserializing with code generation
+
+### Compiling the schema
+Code generation allows us to automatically create classes based on our previously-defined schema. Once we have defined the relevant classes, there is no need to use the schema directly in our programs. We use the avro-tools jar to generate code as follows:
+
+```shell
+java -jar /path/to/avro-tools-1.11.4.jar compile schema
+```
+
+This will generate the appropriate source files in a package based on the schema's namespace in the provided destination folder. For instance, to generate a User class in package example.avro from the schema defined above, run
+
+```shell
+java -jar /path/to/avro-tools-1.11.4.jar compile schema user.avsc .
+```
+
+Note that if you using the Avro Maven plugin, there is no need to manually invoke the schema compiler; the plugin automatically performs code generation on any .avsc files present in the configured source directory.
+
+### Creating Users
+Now that we've completed the code generation, let's create some Users, serialize them to a data file on disk, and then read back the file and deserialize the User objects.
+
+First let's create some Users and set their fields.
+
+```java
+User user1 = new User();
+user1.setName("Alyssa");
+user1.setFavoriteNumber(256);
+// Leave favorite color null
+
+// Alternate constructor
+User user2 = new User("Ben", 7, "red");
+
+// Construct via builder
+User user3 = User.newBuilder()
+ .setName("Charlie")
+ .setFavoriteColor("blue")
+ .setFavoriteNumber(null)
+ .build();
+```
+
+As shown in this example, Avro objects can be created either by invoking a constructor directly or by using a builder. Unlike constructors, builders will automatically set any default values specified in the schema. Additionally, builders validate the data as it set, whereas objects constructed directly will not cause an error until the object is serialized. However, using constructors directly generally offers better performance, as builders create a copy of the datastructure before it is written.
+
+Note that we do not set user1's favorite color. Since that record is of type ["string", "null"], we can either set it to a string or leave it null; it is essentially optional. Similarly, we set user3's favorite number to null (using a builder requires setting all fields, even if they are null).
+
+### Serializing
+Now let's serialize our Users to disk.
+
+```java
+// Serialize user1, user2 and user3 to disk
+DatumWriter userDatumWriter = new SpecificDatumWriter(User.class);
+DataFileWriter dataFileWriter = new DataFileWriter(userDatumWriter);
+dataFileWriter.create(user1.getSchema(), new File("users.avro"));
+dataFileWriter.append(user1);
+dataFileWriter.append(user2);
+dataFileWriter.append(user3);
+dataFileWriter.close();
+```
+
+We create a DatumWriter, which converts Java objects into an in-memory serialized format. The SpecificDatumWriter class is used with generated classes and extracts the schema from the specified generated type.
+
+Next we create a DataFileWriter, which writes the serialized records, as well as the schema, to the file specified in the dataFileWriter.create call. We write our users to the file via calls to the dataFileWriter.append method. When we are done writing, we close the data file.
+
+### Deserializing
+Finally, let's deserialize the data file we just created.
+
+```java
+// Deserialize Users from disk
+DatumReader userDatumReader = new SpecificDatumReader(User.class);
+DataFileReader dataFileReader = new DataFileReader(file, userDatumReader);
+User user = null;
+while (dataFileReader.hasNext()) {
+// Reuse user object by passing it to next(). This saves us from
+// allocating and garbage collecting many objects for files with
+// many items.
+user = dataFileReader.next(user);
+System.out.println(user);
+}
+```
+
+This snippet will output:
+
+```json
+{"name": "Alyssa", "favorite_number": 256, "favorite_color": null}
+{"name": "Ben", "favorite_number": 7, "favorite_color": "red"}
+{"name": "Charlie", "favorite_number": null, "favorite_color": "blue"}
+```
+
+Deserializing is very similar to serializing. We create a SpecificDatumReader, analogous to the SpecificDatumWriter we used in serialization, which converts in-memory serialized items into instances of our generated class, in this case User. We pass the DatumReader and the previously created File to a DataFileReader, analogous to the DataFileWriter, which reads both the schema used by the writer as well as the data from the file on disk. The data will be read using the writer's schema included in the file and the schema provided by the reader, in this case the User class. The writer's schema is needed to know the order in which fields were written, while the reader's schema is needed to know what fields are expected and how to fill in default values for fields added since the file was written. If there are differences between the two schemas, they are resolved according to the Schema Resolution specification.
+
+Next we use the DataFileReader to iterate through the serialized Users and print the deserialized object to stdout. Note how we perform the iteration: we create a single User object which we store the current deserialized user in, and pass this record object to every call of dataFileReader.next. This is a performance optimization that allows the DataFileReader to reuse the same User object rather than allocating a new User for every iteration, which can be very expensive in terms of object allocation and garbage collection if we deserialize a large data file. While this technique is the standard way to iterate through a data file, it's also possible to use for (User user : dataFileReader) if performance is not a concern.
+
+### Compiling and running the example code
+This example code is included as a Maven project in the examples/java-example directory in the Avro docs. From this directory, execute the following commands to build and run the example:
+
+```shell
+$ mvn compile # includes code generation via Avro Maven plugin
+$ mvn -q exec:java -Dexec.mainClass=example.SpecificMain
+```
+
+### Beta feature: Generating faster code
+In release 1.9.0, we introduced a new approach to generating code that speeds up decoding of objects by more than 10% and encoding by more than 30% (future performance enhancements are underway). To ensure a smooth introduction of this change into production systems, this feature is controlled by a feature flag, the system property org.apache.avro.specific.use_custom_coders. In this first release, this feature is off by default. To turn it on, set the system flag to true at runtime. In the sample above, for example, you could enable the fater coders as follows:
+
+$ mvn -q exec:java -Dexec.mainClass=example.SpecificMain \
+ -Dorg.apache.avro.specific.use_custom_coders=true
+
+Note that you do not have to recompile your Avro schema to have access to this feature. The feature is compiled and built into your code, and you turn it on and off at runtime using the feature flag. As a result, you can turn it on during testing, for example, and then off in production. Or you can turn it on in production, and quickly turn it off if something breaks.
+
+We encourage the Avro community to exercise this new feature early to help build confidence. (For those paying one-demand for compute resources in the cloud, it can lead to meaningful cost savings.) As confidence builds, we will turn this feature on by default, and eventually eliminate the feature flag (and the old code).
+
+## Serializing and deserializing without code generation
+Data in Avro is always stored with its corresponding schema, meaning we can always read a serialized item regardless of whether we know the schema ahead of time. This allows us to perform serialization and deserialization without code generation.
+
+Let's go over the same example as in the previous section, but without using code generation: we'll create some users, serialize them to a data file on disk, and then read back the file and deserialize the users objects.
+
+### Creating users
+First, we use a Parser to read our schema definition and create a Schema object.
+
+```java
+Schema schema = new Schema.Parser().parse(new File("user.avsc"));
+```
+
+Using this schema, let's create some users.
+
+```java
+GenericRecord user1 = new GenericData.Record(schema);
+user1.put("name", "Alyssa");
+user1.put("favorite_number", 256);
+// Leave favorite color null
+
+GenericRecord user2 = new GenericData.Record(schema);
+user2.put("name", "Ben");
+user2.put("favorite_number", 7);
+user2.put("favorite_color", "red");
+```
+
+Since we're not using code generation, we use GenericRecords to represent users. GenericRecord uses the schema to verify that we only specify valid fields. If we try to set a non-existent field (e.g., user1.put("favorite_animal", "cat")), we'll get an AvroRuntimeException when we run the program.
+
+Note that we do not set user1's favorite color. Since that record is of type ["string", "null"], we can either set it to a string or leave it null; it is essentially optional.
+
+### Serializing
+Now that we've created our user objects, serializing and deserializing them is almost identical to the example above which uses code generation. The main difference is that we use generic instead of specific readers and writers.
+
+First we'll serialize our users to a data file on disk.
+
+```java
+// Serialize user1 and user2 to disk
+File file = new File("users.avro");
+DatumWriter datumWriter = new GenericDatumWriter(schema);
+DataFileWriter dataFileWriter = new DataFileWriter(datumWriter);
+dataFileWriter.create(schema, file);
+dataFileWriter.append(user1);
+dataFileWriter.append(user2);
+dataFileWriter.close();
+```
+
+We create a DatumWriter, which converts Java objects into an in-memory serialized format. Since we are not using code generation, we create a GenericDatumWriter. It requires the schema both to determine how to write the GenericRecords and to verify that all non-nullable fields are present.
+
+As in the code generation example, we also create a DataFileWriter, which writes the serialized records, as well as the schema, to the file specified in the dataFileWriter.create call. We write our users to the file via calls to the dataFileWriter.append method. When we are done writing, we close the data file.
+
+### Deserializing
+Finally, we'll deserialize the data file we just created.
+
+```java
+// Deserialize users from disk
+DatumReader datumReader = new GenericDatumReader(schema);
+DataFileReader dataFileReader = new DataFileReader(file, datumReader);
+GenericRecord user = null;
+while (dataFileReader.hasNext()) {
+// Reuse user object by passing it to next(). This saves us from
+// allocating and garbage collecting many objects for files with
+// many items.
+user = dataFileReader.next(user);
+System.out.println(user);
+```
+
+This outputs:
+
+```json
+{"name": "Alyssa", "favorite_number": 256, "favorite_color": null}
+{"name": "Ben", "favorite_number": 7, "favorite_color": "red"}
+```
+
+Deserializing is very similar to serializing. We create a GenericDatumReader, analogous to the GenericDatumWriter we used in serialization, which converts in-memory serialized items into GenericRecords. We pass the DatumReader and the previously created File to a DataFileReader, analogous to the DataFileWriter, which reads both the schema used by the writer as well as the data from the file on disk. The data will be read using the writer's schema included in the file, and the reader's schema provided to the GenericDatumReader. The writer's schema is needed to know the order in which fields were written, while the reader's schema is needed to know what fields are expected and how to fill in default values for fields added since the file was written. If there are differences between the two schemas, they are resolved according to the Schema Resolution specification.
+
+Next, we use the DataFileReader to iterate through the serialized users and print the deserialized object to stdout. Note how we perform the iteration: we create a single GenericRecord object which we store the current deserialized user in, and pass this record object to every call of dataFileReader.next. This is a performance optimization that allows the DataFileReader to reuse the same record object rather than allocating a new GenericRecord for every iteration, which can be very expensive in terms of object allocation and garbage collection if we deserialize a large data file. While this technique is the standard way to iterate through a data file, it's also possible to use for (GenericRecord user : dataFileReader) if performance is not a concern.
+
+### Compiling and running the example code
+This example code is included as a Maven project in the examples/java-example directory in the Avro docs. From this directory, execute the following commands to build and run the example:
+
+```shell
+$ mvn compile
+$ mvn -q exec:java -Dexec.mainClass=example.GenericMain
+```
diff --git a/doc/content/en/docs/1.11.4/Getting started (Python)/_index.md b/doc/content/en/docs/1.11.4/Getting started (Python)/_index.md
new file mode 100644
index 00000000000..d4e40a6146d
--- /dev/null
+++ b/doc/content/en/docs/1.11.4/Getting started (Python)/_index.md
@@ -0,0 +1,147 @@
+---
+categories: []
+tags: ["python"]
+title: "Getting Started (Python)"
+linkTitle: "Getting Started (Python)"
+weight: 3
+---
+
+
+
+This is a short guide for getting started with Apache Avro™ using Python. This guide only covers using Avro for data serialization; see Patrick Hunt's Avro RPC Quick Start for a good introduction to using Avro for RPC.
+
+## Notice for Python 3 users
+A package called "avro-python3" had been provided to support Python 3 previously, but the codebase was consolidated into the "avro" package and that supports both Python 2 and 3 now. The avro-python3 package will be removed in the near future, so users should use the "avro" package instead. They are mostly API compatible, but there's a few minor difference (e.g., function name capitalization, such as avro.schema.Parse vs avro.schema.parse).
+
+## Download
+For Python, the easiest way to get started is to install it from PyPI. Python's Avro API is available over PyPi.
+
+```shell
+$ python3 -m pip install avro
+```
+
+The official releases of the Avro implementations for C, C++, C#, Java, PHP, Python, and Ruby can be downloaded from the Apache Avro™ Releases page. This guide uses Avro 1.11.4, the latest version at the time of writing. Download and unzip avro-1.11.4.tar.gz, and install via python setup.py (this will probably require root privileges). Ensure that you can import avro from a Python prompt.
+
+```shell
+$ tar xvf avro-1.11.4.tar.gz
+$ cd avro-1.11.4
+$ python setup.py install
+$ python
+>>> import avro # should not raise ImportError
+```
+
+Alternatively, you may build the Avro Python library from source. From your the root Avro directory, run the commands
+
+```shell
+$ cd lang/py/
+$ python3 -m pip install -e .
+$ python
+```
+
+## Defining a schema
+Avro schemas are defined using JSON. Schemas are composed of primitive types (null, boolean, int, long, float, double, bytes, and string) and complex types (record, enum, array, map, union, and fixed). You can learn more about Avro schemas and types from the specification, but for now let's start with a simple schema example, user.avsc:
+
+```json
+{"namespace": "example.avro",
+ "type": "record",
+ "name": "User",
+ "fields": [
+ {"name": "name", "type": "string"},
+ {"name": "favorite_number", "type": ["int", "null"]},
+ {"name": "favorite_color", "type": ["string", "null"]}
+ ]
+}
+```
+
+This schema defines a record representing a hypothetical user. (Note that a schema file can only contain a single schema definition.) At minimum, a record definition must include its type ("type": "record"), a name ("name": "User"), and fields, in this case name, favorite_number, and favorite_color. We also define a namespace ("namespace": "example.avro"), which together with the name attribute defines the "full name" of the schema (example.avro.User in this case).
+
+Fields are defined via an array of objects, each of which defines a name and type (other attributes are optional, see the record specification for more details). The type attribute of a field is another schema object, which can be either a primitive or complex type. For example, the name field of our User schema is the primitive type string, whereas the favorite_number and favorite_color fields are both unions, represented by JSON arrays. unions are a complex type that can be any of the types listed in the array; e.g., favorite_number can either be an int or null, essentially making it an optional field.
+
+## Serializing and deserializing without code generation
+Data in Avro is always stored with its corresponding schema, meaning we can always read a serialized item, regardless of whether we know the schema ahead of time. This allows us to perform serialization and deserialization without code generation. Note that the Avro Python library does not support code generation.
+
+Try running the following code snippet, which serializes two users to a data file on disk, and then reads back and deserializes the data file:
+
+```python
+import avro.schema
+from avro.datafile import DataFileReader, DataFileWriter
+from avro.io import DatumReader, DatumWriter
+
+schema = avro.schema.parse(open("user.avsc", "rb").read())
+
+writer = DataFileWriter(open("users.avro", "wb"), DatumWriter(), schema)
+writer.append({"name": "Alyssa", "favorite_number": 256})
+writer.append({"name": "Ben", "favorite_number": 7, "favorite_color": "red"})
+writer.close()
+
+reader = DataFileReader(open("users.avro", "rb"), DatumReader())
+for user in reader:
+ print(user)
+reader.close()
+```
+
+This outputs:
+
+```json
+{u'favorite_color': None, u'favorite_number': 256, u'name': u'Alyssa'}
+{u'favorite_color': u'red', u'favorite_number': 7, u'name': u'Ben'}
+```
+
+Do make sure that you open your files in binary mode (i.e. using the modes wb or rb respectively). Otherwise you might generate corrupt files due to automatic replacement of newline characters with the platform-specific representations.
+
+Let's take a closer look at what's going on here.
+
+```python
+schema = avro.schema.parse(open("user.avsc", "rb").read())
+```
+
+avro.schema.parse takes a string containing a JSON schema definition as input and outputs a avro.schema.Schema object (specifically a subclass of Schema, in this case RecordSchema). We're passing in the contents of our user.avsc schema file here.
+
+```python
+writer = DataFileWriter(open("users.avro", "wb"), DatumWriter(), schema)
+```
+
+We create a DataFileWriter, which we'll use to write serialized items to a data file on disk. The DataFileWriter constructor takes three arguments:
+
+* The file we'll serialize to
+* A DatumWriter, which is responsible for actually serializing the items to Avro's binary format (DatumWriters can be used separately from DataFileWriters, e.g., to perform IPC with Avro).
+* The schema we're using. The DataFileWriter needs the schema both to write the schema to the data file, and to verify that the items we write are valid items and write the appropriate fields.
+
+```python
+writer.append({"name": "Alyssa", "favorite_number": 256})
+writer.append({"name": "Ben", "favorite_number": 7, "favorite_color": "red"})
+```
+
+We use DataFileWriter.append to add items to our data file. Avro records are represented as Python dicts. Since the field favorite_color has type ["string", "null"], we are not required to specify this field, as shown in the first append. Were we to omit the required name field, an exception would be raised. Any extra entries not corresponding to a field are present in the dict are ignored.
+
+```python
+reader = DataFileReader(open("users.avro", "rb"), DatumReader())
+```
+
+We open the file again, this time for reading back from disk. We use a DataFileReader and DatumReader analagous to the DataFileWriter and DatumWriter above.
+
+```python
+for user in reader:
+ print(user)
+```
+
+The DataFileReader is an iterator that returns dicts corresponding to the serialized items.
diff --git a/doc/content/en/docs/1.11.4/IDL Language/_index.md b/doc/content/en/docs/1.11.4/IDL Language/_index.md
new file mode 100644
index 00000000000..f50b0a489be
--- /dev/null
+++ b/doc/content/en/docs/1.11.4/IDL Language/_index.md
@@ -0,0 +1,435 @@
+---
+title: "IDL Language"
+linkTitle: "IDL Language"
+weight: 201
+---
+
+
+
+## Introduction
+This document defines Avro IDL, a higher-level language for authoring Avro schemata. Before reading this document, you should have familiarity with the concepts of schemata and protocols, as well as the various primitive and complex types available in Avro.
+
+## Overview
+
+### Purpose
+The aim of the Avro IDL language is to enable developers to author schemata in a way that feels more similar to common programming languages like Java, C++, or Python. Additionally, the Avro IDL language may feel more familiar for those users who have previously used the interface description languages (IDLs) in other frameworks like Thrift, Protocol Buffers, or CORBA.
+
+### Usage
+Each Avro IDL file defines a single Avro Protocol, and thus generates as its output a JSON-format Avro Protocol file with extension .avpr.
+
+To convert a _.avdl_ file into a _.avpr_ file, it may be processed by the `idl` tool. For example:
+```shell
+$ java -jar avro-tools.jar idl src/test/idl/input/namespaces.avdl /tmp/namespaces.avpr
+$ head /tmp/namespaces.avpr
+{
+ "protocol" : "TestNamespace",
+ "namespace" : "avro.test.protocol",
+```
+The `idl` tool can also process input to and from _stdin_ and _stdout_. See `idl --help` for full usage information.
+
+A Maven plugin is also provided to compile .avdl files. To use it, add something like the following to your pom.xml:
+```xml
+
+
+
+ org.apache.avro
+ avro-maven-plugin
+
+
+
+ idl-protocol
+
+
+
+
+
+
+```
+
+## Defining a Protocol in Avro IDL
+An Avro IDL file consists of exactly one protocol definition. The minimal protocol is defined by the following code:
+```java
+protocol MyProtocol {
+}
+```
+This is equivalent to (and generates) the following JSON protocol definition:
+```json
+{
+"protocol" : "MyProtocol",
+ "types" : [ ],
+ "messages" : {
+ }
+}
+```
+The namespace of the protocol may be changed using the @namespace annotation:
+```java
+@namespace("mynamespace")
+protocol MyProtocol {
+}
+```
+This notation is used throughout Avro IDL as a way of specifying properties for the annotated element, as will be described later in this document.
+
+Protocols in Avro IDL can contain the following items:
+
+* Imports of external protocol and schema files.
+* Definitions of named schemata, including records, errors, enums, and fixeds.
+* Definitions of RPC messages
+
+## Imports
+Files may be imported in one of three formats:
+
+* An IDL file may be imported with a statement like:
+
+ `import idl "foo.avdl";`
+
+* A JSON protocol file may be imported with a statement like:
+
+ `import protocol "foo.avpr";`
+
+* A JSON schema file may be imported with a statement like:
+
+ `import schema "foo.avsc";`
+
+Messages and types in the imported file are added to this file's protocol.
+
+Imported file names are resolved relative to the current IDL file.
+
+## Defining an Enumeration
+Enums are defined in Avro IDL using a syntax similar to C or Java. An Avro Enum supports optional default values. In the case that a reader schema is unable to recognize a symbol written by the writer, the reader will fall back to using the defined default value. This default is only used when an incompatible symbol is read. It is not used if the enum field is missing.
+
+Example Writer Enum Definition
+```java
+enum Shapes {
+ SQUARE, TRIANGLE, CIRCLE, OVAL
+}
+```
+Example Reader Enum Definition
+```java
+enum Shapes {
+ SQUARE, TRIANGLE, CIRCLE
+} = CIRCLE;
+```
+In the above example, the reader will use the default value of `CIRCLE` whenever reading data written with the `OVAL` symbol of the writer. Also note that, unlike the JSON format, anonymous enums cannot be defined.
+
+## Defining a Fixed Length Field
+Fixed fields are defined using the following syntax:
+```
+fixed MD5(16);
+```
+This example defines a fixed-length type called MD5 which contains 16 bytes.
+
+## Defining Records and Errors
+Records are defined in Avro IDL using a syntax similar to a struct definition in C:
+```java
+record Employee {
+ string name;
+ boolean active = true;
+ long salary;
+}
+```
+The above example defines a record with the name “Employee” with three fields.
+
+To define an error, simply use the keyword _error_ instead of _record_. For example:
+```java
+error Kaboom {
+ string explanation;
+ int result_code = -1;
+}
+```
+Each field in a record or error consists of a type and a name, optional property annotations and an optional default value.
+
+A type reference in Avro IDL must be one of:
+
+* A primitive type
+* A logical type
+* A named schema defined prior to this usage in the same Protocol
+* A complex type (array, map, or union)
+
+### Primitive Types
+The primitive types supported by Avro IDL are the same as those supported by Avro's JSON format. This list includes _int_, _long_, _string_, _boolean_, _float_, _double_, _null_, and _bytes_.
+
+### Logical Types
+Some of the logical types supported by Avro's JSON format are also supported by Avro IDL. The currently supported types are:
+
+* _decimal_ (logical type [decimal]({{< relref "../specification#decimal" >}}))
+* _date_ (logical type [date]({{< relref "../specification#date" >}}))
+* _time_ms_ (logical type [time-millis]({{< relref "../specification#time-millisecond-precision" >}}))
+* _timestamp_ms_ (logical type [timestamp-millis]({{< relref "../specification#timestamp-millisecond-precision" >}}))
+* _uuid_ (logical type [uuid]({{< relref "../specification#uuid" >}}))
+
+For example:
+```java
+record Job {
+ string jobid;
+ date submitDate;
+ time_ms submitTime;
+ timestamp_ms finishTime;
+ decimal(9,2) finishRatio;
+ uuid pk = "a1a2a3a4-b1b2-c1c2-d1d2-d3d4d5d6d7d8";
+}
+```
+
+Logical types can also be specified via an annotation, which is useful for logical types for which a keyword does not exist:
+
+```java
+record Job {
+ string jobid;
+ @logicalType("timestamp-micros")
+ long finishTime;
+}
+```
+
+### References to Named Schemata
+If a named schema has already been defined in the same Avro IDL file, it may be referenced by name as if it were a primitive type:
+```java
+record Card {
+ Suit suit; // refers to the enum Card defined above
+ int number;
+}
+```
+
+### Default Values
+Default values for fields may be optionally specified by using an equals sign after the field name followed by a JSON expression indicating the default value. This JSON is interpreted as described in the [spec]({{< relref "../specification#schema-record" >}}).
+
+### Complex Types
+
+#### Arrays
+Array types are written in a manner that will seem familiar to C++ or Java programmers. An array of any type t is denoted `array`. For example, an array of strings is denoted `array`, and a multidimensional array of Foo records would be `array>`.
+
+#### Maps
+Map types are written similarly to array types. An array that contains values of type t is written `map`. As in the JSON schema format, all maps contain `string`-type keys.
+
+#### Unions
+Union types are denoted as `union { typeA, typeB, typeC, ... }`. For example, this record contains a string field that is optional (unioned with null), and a field containing either a precise or a imprecise number:
+```java
+record RecordWithUnion {
+ union { null, string } optionalString;
+ union { decimal(12, 6), float } number;
+}
+```
+Note that the same restrictions apply to Avro IDL unions as apply to unions defined in the JSON format; namely, a record may not contain multiple elements of the same type. Also, fields/parameters that use the union type and have a default parameter must specify a default value of the same type as the **first** union type.
+
+Because it occurs so often, there is a special shorthand to denote a union of `null` with another type. In the following snippet, the first three fields have identical types:
+
+```java
+record RecordWithUnion {
+ union { null, string } optionalString1 = null;
+ string? optionalString2 = null;
+ string? optionalString3; // No default value
+ string? optionalString4 = "something";
+}
+```
+
+Note that unlike explicit unions, the position of the `null` type is fluid; it will be the first or last type depending on the default value (if any). So in the example above, all fields are valid.
+
+## Defining RPC Messages
+The syntax to define an RPC message within a Avro IDL protocol is similar to the syntax for a method declaration within a C header file or a Java interface. To define an RPC message add which takes two arguments named _foo_ and _bar_, returning an _int_, simply include the following definition within the protocol:
+```java
+int add(int foo, int bar = 0);
+```
+Message arguments, like record fields, may specify default values.
+
+To define a message with no response, you may use the alias _void_, equivalent to the Avro _null_ type:
+```java
+void logMessage(string message);
+```
+If you have previously defined an error type within the same protocol, you may declare that a message can throw this error using the syntax:
+```java
+void goKaboom() throws Kaboom;
+```
+To define a one-way message, use the keyword `oneway` after the parameter list, for example:
+```java
+void fireAndForget(string message) oneway;
+```
+
+## Other Language Features
+
+### Comments
+All Java-style comments are supported within a Avro IDL file. Any text following _//_ on a line is ignored, as is any text between _/*_ and _*/_, possibly spanning multiple lines.
+
+Comments that begin with _/**_ are used as the documentation string for the type or field definition that follows the comment.
+
+### Escaping Identifiers
+Occasionally, one will need to use a reserved language keyword as an identifier. In order to do so, backticks (`) may be used to escape the identifier. For example, to define a message with the literal name error, you may write:
+```java
+void `error`();
+```
+This syntax is allowed anywhere an identifier is expected.
+
+### Annotations for Ordering and Namespaces
+Java-style annotations may be used to add additional properties to types and fields throughout Avro IDL.
+
+For example, to specify the sort order of a field within a record, one may use the `@order` annotation before the field name as follows:
+```java
+record MyRecord {
+ string @order("ascending") myAscendingSortField;
+ string @order("descending") myDescendingField;
+ string @order("ignore") myIgnoredField;
+}
+```
+A field's type (with the exception of type references) may also be preceded by annotations, e.g.:
+```java
+record MyRecord {
+ @java-class("java.util.ArrayList") array myStrings;
+}
+```
+This can be used to support java classes that can be serialized/deserialized via their `toString`/`String constructor`, e.g.:
+```java
+record MyRecord {
+ @java-class("java.math.BigDecimal") string value;
+ @java-key-class("java.io.File") map fileStates;
+ array<@java-class("java.math.BigDecimal") string> weights;
+}
+```
+Similarly, a `@namespace` annotation may be used to modify the namespace when defining a named schema. For example:
+```java
+@namespace("org.apache.avro.firstNamespace")
+protocol MyProto {
+ @namespace("org.apache.avro.someOtherNamespace")
+ record Foo {}
+
+ record Bar {}
+}
+```
+will define a protocol in the _firstNamespace_ namespace. The record _Foo_ will be defined in _someOtherNamespace_ and _Bar_ will be defined in _firstNamespace_ as it inherits its default from its container.
+
+Type and field aliases are specified with the `@aliases` annotation as follows:
+```java
+@aliases(["org.old.OldRecord", "org.ancient.AncientRecord"])
+record MyRecord {
+ string @aliases(["oldField", "ancientField"]) myNewField;
+}
+```
+Some annotations like those listed above are handled specially. All other annotations are added as properties to the protocol, message, schema or field.
+
+## Complete Example
+The following is an example of an Avro IDL file that shows most of the above features:
+```java
+/*
+* Header with license information.
+*/
+
+/**
+ * An example protocol in Avro IDL
+ */
+@namespace("org.apache.avro.test")
+protocol Simple {
+ /** Documentation for the enum type Kind */
+ @aliases(["org.foo.KindOf"])
+ enum Kind {
+ FOO,
+ BAR, // the bar enum value
+ BAZ
+ } = FOO; // For schema evolution purposes, unmatched values do not throw an error, but are resolved to FOO.
+
+ /** MD5 hash; good enough to avoid most collisions, and smaller than (for example) SHA256. */
+ fixed MD5(16);
+
+ record TestRecord {
+ /** Record name; has no intrinsic order */
+ string @order("ignore") name;
+
+ Kind @order("descending") kind;
+
+ MD5 hash;
+
+ /*
+ Note that 'null' is the first union type. Just like .avsc / .avpr files, the default value must be of the first union type.
+ */
+ union { null, MD5 } /** Optional field */ @aliases(["hash"]) nullableHash = null;
+
+ array arrayOfLongs;
+ }
+
+ /** Errors are records that can be thrown from a method */
+ error TestError {
+ string message;
+ }
+
+ string hello(string greeting);
+ /** Return what was given. Demonstrates the use of backticks to name types/fields/messages/parameters after keywords */
+ TestRecord echo(TestRecord `record`);
+ int add(int arg1, int arg2);
+ bytes echoBytes(bytes data);
+ void `error`() throws TestError;
+ // The oneway keyword forces the method to return null.
+ void ping() oneway;
+}
+```
+Additional examples may be found in the Avro source tree under the `src/test/idl/input` directory.
+
+## IDE support
+
+There are several editors and IDEs that support Avro IDL files, usually via plugins.
+
+### JetBrains
+
+Apache Avro IDL Schema Support 203.1.2 was released in 9 December 2021.
+
+Features:
+* Syntax Highlighting
+* Code Completion
+* Code Formatting
+* Error Highlighting
+* Inspections & quick fixes
+* JSON schemas for .avpr and .avsc files
+
+It's available via the [JetBrains Marketplace](https://plugins.jetbrains.com/plugin/15728-apache-avro-idl-schema-support)
+and on [GitHub](https://github.com/opwvhk/avro-schema-support).
+
+The plugin supports almost the all JetBrains products: IntelliJ IDEA, PyCharm, WebStorm, Android Studio, AppCode, GoLand, Rider, CLion, RubyMine, PhpStorm, DataGrip, DataSpell, MPS, Code With Me Guest and JetBrains Client.
+
+Only JetBrains Gateway does not support this plugin directly. But the backend (JetBrains) IDE that it connects to does.
+
+### Eclipse
+
+Avroclipse 0.0.11 was released on 4 December 2019.
+
+Features:
+* Syntax Highlighting
+* Error Highlighting
+* Code Completion
+
+It is available on the [Eclipse Marketplace](https://marketplace.eclipse.org/content/avroclipse)
+and [GitHub](https://github.com/dvdkruk/avroclipse).
+
+### Visual Studio Code
+
+avro-idl 0.5.0 was released on 16 June 2021. It provides syntax highlighting.
+
+It is available on the [VisualStudio Marketplace](https://marketplace.visualstudio.com/items?itemName=streetsidesoftware.avro)
+and [GitHub](https://github.com/Jason3S/vscode-avro-ext)
+
+### Atom.io
+
+atom-language-avro 0.0.13 was released on 14 August 2015. It provides syntax highlighting.
+
+It is available as [Atom.io package](https://atom.io/packages/atom-language-avro)
+and [GitHub](https://github.com/jonesetc/atom-language-avro)
+
+### Vim
+
+A `.avdl` detecting plugin by Gurpreet Atwal on [GitHub](https://github.com/gurpreetatwal/vim-avro) (Last change in December 2016)
+
+[avro-idl.vim](https://github.com/apache/avro/blob/master/share/editors/avro-idl.vim) in the Avro repository `share/editors` directory (last change in September 2010)
+
+Both provide syntax highlighting.
diff --git a/doc/content/en/docs/1.11.4/MapReduce guide/_index.md b/doc/content/en/docs/1.11.4/MapReduce guide/_index.md
new file mode 100644
index 00000000000..2540ff82204
--- /dev/null
+++ b/doc/content/en/docs/1.11.4/MapReduce guide/_index.md
@@ -0,0 +1,396 @@
+---
+title: "MapReduce guide"
+linkTitle: "MapReduce guide"
+weight: 200
+---
+
+
+
+Avro provides a convenient way to represent complex data structures within a Hadoop MapReduce job. Avro data can be used as both input to and output from a MapReduce job, as well as the intermediate format. The example in this guide uses Avro data for all three, but it's possible to mix and match; for instance, MapReduce can be used to aggregate a particular field in an Avro record.
+
+This guide assumes basic familiarity with both Hadoop MapReduce and Avro. See the [Hadoop documentation](https://hadoop.apache.org/docs/current/) and the [Avro getting started guide](./getting-started-java/) for introductions to these projects. This guide uses the old MapReduce API (`org.apache.hadoop.mapred`) and the new MapReduce API (`org.apache.hadoop.mapreduce`).
+
+## Setup
+The code from this guide is included in the Avro docs under examples/mr-example. The example is set up as a Maven project that includes the necessary Avro and MapReduce dependencies and the Avro Maven plugin for code generation, so no external jars are needed to run the example. In particular, the POM includes the following dependencies:
+```xml
+
+ org.apache.avro
+ avro
+ 1.11.4
+
+
+ org.apache.avro
+ avro-mapred
+ 1.11.4
+
+
+ org.apache.hadoop
+ hadoop-client
+ 3.1.2
+
+```
+And the following plugin:
+```xml
+
+ org.apache.avro
+ avro-maven-plugin
+ 1.11.4
+
+
+ generate-sources
+
+ schema
+
+
+ ${project.basedir}/../
+ ${project.basedir}/target/generated-sources/
+
+
+
+
+```
+
+If you do not configure the *sourceDirectory* and *outputDirectory* properties, the defaults will be used. The *sourceDirectory* property defaults to *src/main/avro*. The *outputDirectory* property defaults to *target/generated-sources*. You can change the paths to match your project layout.
+
+Alternatively, Avro jars can be downloaded directly from the Apache Avro™ Releases [page](https://avro.apache.org/releases.html). The relevant Avro jars for this guide are *avro-1.11.4.jar* and *avro-mapred-1.11.4.jar*, as well as *avro-tools-1.11.4.jar* for code generation and viewing Avro data files as JSON. In addition, you will need to install Hadoop in order to use MapReduce.
+
+## Example: ColorCount
+Below is a simple example of a MapReduce that uses Avro. There is an example for both the old (org.apache.hadoop.mapred) and new (org.apache.hadoop.mapreduce) APIs under *examples/mr-example/src/main/java/example/*. _MapredColorCount_ is the example for the older mapred API while _MapReduceColorCount_ is the example for the newer mapreduce API. Both examples are below, but we will detail the mapred API in our subsequent examples.
+
+MapredColorCount.java:
+```java
+package example;
+
+import java.io.IOException;
+
+import org.apache.avro.*;
+import org.apache.avro.Schema.Type;
+import org.apache.avro.mapred.*;
+import org.apache.hadoop.conf.*;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.*;
+import org.apache.hadoop.util.*;
+
+import example.avro.User;
+
+public class MapredColorCount extends Configured implements Tool {
+
+ public static class ColorCountMapper extends AvroMapper> {
+ @Override
+ public void map(User user, AvroCollector> collector, Reporter reporter)
+ throws IOException {
+ CharSequence color = user.getFavoriteColor();
+ // We need this check because the User.favorite_color field has type ["string", "null"]
+ if (color == null) {
+ color = "none";
+ }
+ collector.collect(new Pair(color, 1));
+ }
+ }
+
+ public static class ColorCountReducer extends AvroReducer> {
+ @Override
+ public void reduce(CharSequence key, Iterable values,
+ AvroCollector> collector,
+ Reporter reporter)
+ throws IOException {
+ int sum = 0;
+ for (Integer value : values) {
+ sum += value;
+ }
+ collector.collect(new Pair(key, sum));
+ }
+ }
+
+ public int run(String[] args) throws Exception {
+ if (args.length != 2) {
+ System.err.println("Usage: MapredColorCount