diff --git a/app/build.gradle.kts b/app/build.gradle.kts index a646447..3e486f8 100644 --- a/app/build.gradle.kts +++ b/app/build.gradle.kts @@ -125,7 +125,7 @@ dependencies { implementation("com.github.wendykierp:JTransforms:3.1") // https://mvnrepository.com/artifact/org.apache.commons/commons-math3 implementation("org.apache.commons:commons-math3:3.6.1") - implementation("be.tarsos.dsp:core:2.5") +// implementation("be.tarsos.dsp:core:2.5") // https://mvnrepository.com/artifact/org.bitbucket.ijabz/jaudiotagger implementation("org.bitbucket.ijabz:jaudiotagger:7b004a1") diff --git a/app/src/main/AndroidManifest.xml b/app/src/main/AndroidManifest.xml index 4f5efc2..cb3faa6 100644 --- a/app/src/main/AndroidManifest.xml +++ b/app/src/main/AndroidManifest.xml @@ -2,17 +2,16 @@ - - - - - + + @@ -27,8 +26,11 @@ android:roundIcon="@mipmap/ic_launcher_round" android:supportsRtl="true" android:theme="@style/Theme.MusicPitch" + android:name=".MyApplication" tools:targetApi="tiramisu"> - + + + @@ -48,6 +52,7 @@ + @@ -55,6 +60,7 @@ + - + \ No newline at end of file diff --git a/app/src/main/java/be/tarsos/dsp/AudioDispatcher.java b/app/src/main/java/be/tarsos/dsp/AudioDispatcher.java new file mode 100644 index 0000000..3517f55 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/AudioDispatcher.java @@ -0,0 +1,485 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +package be.tarsos.dsp; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.logging.Level; +import java.util.logging.Logger; + +import be.tarsos.dsp.io.TarsosDSPAudioFloatConverter; +import be.tarsos.dsp.io.TarsosDSPAudioFormat; +import be.tarsos.dsp.io.TarsosDSPAudioInputStream; + + +/** + * This class plays a file and sends float arrays to registered AudioProcessor + * implementors. This class can be used to feed FFT's, pitch detectors, audio players, ... + * Using a (blocking) audio player it is even possible to synchronize execution of + * AudioProcessors and sound. This behavior can be used for visualization. + * @author Joren Six + */ +public class AudioDispatcher implements Runnable { + + + /** + * Log messages. + */ + private static final Logger LOG = Logger.getLogger(AudioDispatcher.class.getName()); + + /** + * The audio stream (in bytes), conversion to float happens at the last + * moment. + */ + private final TarsosDSPAudioInputStream audioInputStream; + + /** + * This buffer is reused again and again to store audio data using the float + * data type. + */ + private float[] audioFloatBuffer; + + /** + * This buffer is reused again and again to store audio data using the byte + * data type. + */ + private byte[] audioByteBuffer; + + /** + * A list of registered audio processors. The audio processors are + * responsible for actually doing the digital signal processing + */ + private final List audioProcessors; + + /** + * Converter converts an array of floats to an array of bytes (and vice + * versa). + */ + private final TarsosDSPAudioFloatConverter converter; + + private final TarsosDSPAudioFormat format; + + /** + * The floatOverlap: the number of elements that are copied in the buffer + * from the previous buffer. Overlap should be smaller (strict) than the + * buffer size and can be zero. Defined in number of samples. + */ + private int floatOverlap, floatStepSize; + + /** + * The overlap and stepsize defined not in samples but in bytes. So it + * depends on the bit depth. Since the int datatype is used only 8,16,24,... + * bits or 1,2,3,... bytes are supported. + */ + private int byteOverlap, byteStepSize; + + + /** + * The number of bytes to skip before processing starts. + */ + private long bytesToSkip; + + /** + * Position in the stream in bytes. e.g. if 44100 bytes are processed and 16 + * bits per frame are used then you are 0.5 seconds into the stream. + */ + private long bytesProcessed; + + + /** + * The audio event that is send through the processing chain. + */ + private final AudioEvent audioEvent; + + /** + * If true the dispatcher stops dispatching audio. + */ + private boolean stopped; + + /** + * If true then the first buffer is only filled up to buffer size - hop size + * E.g. if the buffer is 2048 and the hop size is 48 then you get 2000 times + * zero 0 and 48 actual audio samples. During the next iteration you get + * mostly zeros and 96 samples. + */ + private boolean zeroPadFirstBuffer; + + /** + * If true then the last buffer is zero padded. Otherwise the buffer is + * shortened to the remaining number of samples. If false then the audio + * processors must be prepared to handle shorter audio buffers. + */ + private boolean zeroPadLastBuffer; + + /** + * Create a new dispatcher from a stream. + * + * @param stream + * The stream to read data from. + * @param audioBufferSize + * The size of the buffer defines how much samples are processed + * in one step. Common values are 1024,2048. + * @param bufferOverlap + * How much consecutive buffers overlap (in samples). Half of the + * AudioBufferSize is common (512, 1024) for an FFT. + */ + public AudioDispatcher(final TarsosDSPAudioInputStream stream, final int audioBufferSize, final int bufferOverlap){ + // The copy on write list allows concurrent modification of the list while + // it is iterated. A nice feature to have when adding AudioProcessors while + // the AudioDispatcher is running. + audioProcessors = new CopyOnWriteArrayList(); + audioInputStream = stream; + + format = audioInputStream.getFormat(); + + + setStepSizeAndOverlap(audioBufferSize, bufferOverlap); + + audioEvent = new AudioEvent(format); + audioEvent.setFloatBuffer(audioFloatBuffer); + audioEvent.setOverlap(bufferOverlap); + + converter = TarsosDSPAudioFloatConverter.getConverter(format); + + stopped = false; + + bytesToSkip = 0; + + zeroPadLastBuffer = true; + } + + /** + * Skip a number of seconds before processing the stream. + * @param seconds The number of seconds to skip + */ + public void skip(double seconds){ + bytesToSkip = Math.round(seconds * format.getSampleRate()) * format.getFrameSize(); + } + + /** + * Set a new step size and overlap size. Both in number of samples. Watch + * out with this method: it should be called after a batch of samples is + * processed, not during. + * + * @param audioBufferSize + * The size of the buffer defines how much samples are processed + * in one step. Common values are 1024,2048. + * @param bufferOverlap + * How much consecutive buffers overlap (in samples). Half of the + * AudioBufferSize is common (512, 1024) for an FFT. + */ + public void setStepSizeAndOverlap(final int audioBufferSize, final int bufferOverlap){ + audioFloatBuffer = new float[audioBufferSize]; + floatOverlap = bufferOverlap; + floatStepSize = audioFloatBuffer.length - floatOverlap; + + audioByteBuffer = new byte[audioFloatBuffer.length * format.getFrameSize()]; + byteOverlap = floatOverlap * format.getFrameSize(); + byteStepSize = floatStepSize * format.getFrameSize(); + } + + /** + * if zero pad is true then the first buffer is only filled up to buffer size - hop size + * E.g. if the buffer is 2048 and the hop size is 48 then you get 2000x0 and 48 filled audio samples + * @param zeroPadFirstBuffer true if the buffer should be zeroPadFirstBuffer, false otherwise. + */ + public void setZeroPadFirstBuffer(boolean zeroPadFirstBuffer){ + this.zeroPadFirstBuffer = zeroPadFirstBuffer; + } + + /** + * If zero pad last buffer is true then the last buffer is filled with zeros until the normal amount + * of elements are present in the buffer. Otherwise the buffer only contains the last elements and no zeros. + * By default it is set to true. + * + * @param zeroPadLastBuffer A boolean to control whether the last buffer is zero-padded. + */ + public void setZeroPadLastBuffer(boolean zeroPadLastBuffer) { + this.zeroPadLastBuffer = zeroPadLastBuffer; + } + + + /** + * Adds an AudioProcessor to the chain of processors. + * + * @param audioProcessor + * The AudioProcessor to add. + */ + public void addAudioProcessor(final AudioProcessor audioProcessor) { + audioProcessors.add(audioProcessor); + LOG.fine("Added an audioprocessor to the list of processors: " + audioProcessor.toString()); + } + + /** + * Removes an AudioProcessor to the chain of processors and calls its processingFinished method. + * + * @param audioProcessor + * The AudioProcessor to remove. + */ + public void removeAudioProcessor(final AudioProcessor audioProcessor) { + audioProcessors.remove(audioProcessor); + audioProcessor.processingFinished(); + LOG.fine("Remove an audioprocessor to the list of processors: " + audioProcessor); + } + + public void run() { + + int bytesRead = 0; + + if(bytesToSkip!=0){ + skipToStart(); + } + + //Read the first (and in some cases last) audio block. + try { + //needed to get correct time info when skipping first x seconds + audioEvent.setBytesProcessed(bytesProcessed); + bytesRead = readNextAudioBlock(); + } catch (IOException e) { + String message="Error while reading audio input stream: " + e.getMessage(); + LOG.warning(message); + throw new Error(message); + } + + // As long as the stream has not ended + while (bytesRead != 0 && !stopped) { + + //Makes sure the right buffers are processed, they can be changed by audio processors. + for (final AudioProcessor processor : audioProcessors) { + if(!processor.process(audioEvent)){ + //skip to the next audio processors if false is returned. + break; + } + } + + if(!stopped){ + //Update the number of bytes processed; + bytesProcessed += bytesRead; + audioEvent.setBytesProcessed(bytesProcessed); + + // Read, convert and process consecutive overlapping buffers. + // Slide the buffer. + try { + bytesRead = readNextAudioBlock(); + audioEvent.setOverlap(floatOverlap); + } catch (IOException e) { + String message="Error while reading audio input stream: " + e.getMessage(); + LOG.warning(message); + throw new Error(message); + } + } + } + + // Notify all processors that no more data is available. + // when stop() is called processingFinished is called explicitly, no need to do this again. + // The explicit call is to prevent timing issues. + if(!stopped){ + stop(); + } + } + + + private void skipToStart() { + long skipped = 0L; + try{ + skipped = audioInputStream.skip(bytesToSkip); + if(skipped !=bytesToSkip){ + throw new IOException(); + } + bytesProcessed += bytesToSkip; + }catch(IOException e){ + String message=String.format("Did not skip the expected amount of bytes, %d skipped, %d expected!", skipped,bytesToSkip); + LOG.warning(message); + throw new Error(message); + } + } + + /** + * Stops dispatching audio data. + */ + public void stop() { + stopped = true; + for (final AudioProcessor processor : audioProcessors) { + processor.processingFinished(); + } + try { + audioInputStream.close(); + } catch (IOException e) { + LOG.log(Level.SEVERE, "Closing audio stream error.", e); + } + } + + /** + * Reads the next audio block. It tries to read the number of bytes defined + * by the audio buffer size minus the overlap. If the expected number of + * bytes could not be read either the end of the stream is reached or + * something went wrong. + * + * The behavior for the first and last buffer is defined by their corresponding the zero pad settings. The method also handles the case if + * the first buffer is also the last. + * + * @return The number of bytes read. + * @throws IOException + * When something goes wrong while reading the stream. In + * particular, an IOException is thrown if the input stream has + * been closed. + */ + private int readNextAudioBlock() throws IOException { + assert floatOverlap < audioFloatBuffer.length; + + // Is this the first buffer? + boolean isFirstBuffer = (bytesProcessed ==0 || bytesProcessed == bytesToSkip); + + final int offsetInBytes; + + final int offsetInSamples; + + final int bytesToRead; + //Determine the amount of bytes to read from the stream + if(isFirstBuffer && !zeroPadFirstBuffer){ + //If this is the first buffer and we do not want to zero pad the + //first buffer then read a full buffer + bytesToRead = audioByteBuffer.length; + // With an offset in bytes of zero; + offsetInBytes = 0; + offsetInSamples=0; + }else{ + //In all other cases read the amount of bytes defined by the step size + bytesToRead = byteStepSize; + offsetInBytes = byteOverlap; + offsetInSamples = floatOverlap; + } + + //Shift the audio information using array copy since it is probably faster than manually shifting it. + // No need to do this on the first buffer + if(!isFirstBuffer && audioFloatBuffer.length == floatOverlap + floatStepSize ){ + System.arraycopy(audioFloatBuffer,floatStepSize, audioFloatBuffer,0 ,floatOverlap); + /* + for(int i = floatStepSize ; i < floatStepSize+floatOverlap ; i++){ + audioFloatBuffer[i-floatStepSize] = audioFloatBuffer[i]; + }*/ + } + + // Total amount of bytes read + int totalBytesRead = 0; + + // The amount of bytes read from the stream during one iteration. + int bytesRead=0; + + // Is the end of the stream reached? + boolean endOfStream = false; + + // Always try to read the 'bytesToRead' amount of bytes. + // unless the stream is closed (stopped is true) or no bytes could be read during one iteration + while(!stopped && !endOfStream && totalBytesReadRMS of + * the signal present in the current buffer. + */ + public double getRMS() { + return calculateRMS(floatBuffer); + } + + + /** + * Returns the dBSPL for a buffer. + * + * @return The dBSPL level for the buffer. + */ + public double getdBSPL() { + return soundPressureLevel(floatBuffer); + } + + /** + * Calculates and returns the root mean square of the signal. Please + * cache the result since it is calculated every time. + * @param floatBuffer The audio buffer to calculate the RMS for. + * @return The RMS of + * the signal present in the current buffer. + */ + public static double calculateRMS(float[] floatBuffer){ + double rms = 0.0; + for (int i = 0; i < floatBuffer.length; i++) { + rms += floatBuffer[i] * floatBuffer[i]; + } + rms = rms / Double.valueOf(floatBuffer.length); + rms = Math.sqrt(rms); + return rms; + } + + /** + * Set all sample values to zero. + */ + public void clearFloatBuffer() { + Arrays.fill(floatBuffer, 0); + } + + /** + * Returns the dBSPL for a buffer. + * + * @param buffer + * The buffer with audio information. + * @return The dBSPL level for the buffer. + */ + private static double soundPressureLevel(final float[] buffer) { + double rms = calculateRMS(buffer); + return linearToDecibel(rms); + } + + /** + * Converts a linear to a dB value. + * + * @param value + * The value to convert. + * @return The converted value. + */ + private static double linearToDecibel(final double value) { + return 20.0 * Math.log10(value); + } + + /** + * Checks whether this block of audio is silent + * @param silenceThreshold the threshold in spl to use. + * @return True if SPL is below the threshold. False otherwise. + */ + public boolean isSilence(double silenceThreshold) { + return soundPressureLevel(floatBuffer) < silenceThreshold; + } + + /** + * The number of bytes being processed. + * @param bytesProcessing Sets the number of bytes being processed. + */ + public void setBytesProcessing(int bytesProcessing) { + this.bytesProcessing = bytesProcessing; + + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/AudioGenerator.java b/app/src/main/java/be/tarsos/dsp/AudioGenerator.java new file mode 100644 index 0000000..0794350 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/AudioGenerator.java @@ -0,0 +1,267 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +package be.tarsos.dsp; + +import java.nio.ByteOrder; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.logging.Logger; + +import be.tarsos.dsp.io.TarsosDSPAudioFormat; + + +/** + * This class plays a file and sends float arrays to registered AudioProcessor + * implementors. This class can be used to feed FFT's, pitch detectors, audio players, ... + * Using a (blocking) audio player it is even possible to synchronize execution of + * AudioProcessors and sound. This behavior can be used for visualization. + * @author Joren Six + */ +public class AudioGenerator implements Runnable { + + + /** + * Log messages. + */ + private static final Logger LOG = Logger.getLogger(AudioGenerator.class.getName()); + + + /** + * This buffer is reused again and again to store audio data using the float + * data type. + */ + private float[] audioFloatBuffer; + + + /** + * A list of registered audio processors. The audio processors are + * responsible for actually doing the digital signal processing + */ + private final List audioProcessors; + + + private final TarsosDSPAudioFormat format; + + /** + * The floatOverlap: the number of elements that are copied in the buffer + * from the previous buffer. Overlap should be smaller (strict) than the + * buffer size and can be zero. Defined in number of samples. + */ + private int floatOverlap, floatStepSize; + + private int samplesProcessed; + + /** + * The audio event that is send through the processing chain. + */ + private final AudioEvent audioEvent; + + /** + * If true the dispatcher stops dispatching audio. + */ + private boolean stopped; + + + /** + * Create a new generator. + * @param audioBufferSize + * The size of the buffer defines how much samples are processed + * in one step. Common values are 1024,2048. + * @param bufferOverlap + * How much consecutive buffers overlap (in samples). Half of the + * AudioBufferSize is common (512, 1024) for an FFT. + */ + public AudioGenerator(final int audioBufferSize, final int bufferOverlap){ + + this(audioBufferSize,bufferOverlap,44100); + } + + public AudioGenerator(final int audioBufferSize, final int bufferOverlap,final int samplerate){ + + audioProcessors = new CopyOnWriteArrayList(); + + + format = getTargetAudioFormat(samplerate); + + + setStepSizeAndOverlap(audioBufferSize, bufferOverlap); + + audioEvent = new AudioEvent(format); + audioEvent.setFloatBuffer(audioFloatBuffer); + + stopped = false; + + + samplesProcessed = 0; + } + + /** + * Constructs the target audio format. The audio format is one channel + * signed PCM of a given sample rate. + * + * @param targetSampleRate + * The sample rate to convert to. + * @return The audio format after conversion. + */ + private TarsosDSPAudioFormat getTargetAudioFormat(int targetSampleRate) { + TarsosDSPAudioFormat audioFormat = new TarsosDSPAudioFormat(TarsosDSPAudioFormat.Encoding.PCM_SIGNED, + targetSampleRate, + 2 * 8, + 1, + 2, + targetSampleRate, + ByteOrder.BIG_ENDIAN.equals(ByteOrder.nativeOrder())); + return audioFormat; + } + + + + /** + * Set a new step size and overlap size. Both in number of samples. Watch + * out with this method: it should be called after a batch of samples is + * processed, not during. + * + * @param audioBufferSize + * The size of the buffer defines how much samples are processed + * in one step. Common values are 1024,2048. + * @param bufferOverlap + * How much consecutive buffers overlap (in samples). Half of the + * AudioBufferSize is common (512, 1024) for an FFT. + */ + public void setStepSizeAndOverlap(final int audioBufferSize, final int bufferOverlap){ + audioFloatBuffer = new float[audioBufferSize]; + floatOverlap = bufferOverlap; + floatStepSize = audioFloatBuffer.length - floatOverlap; + } + + + /** + * Adds an AudioProcessor to the chain of processors. + * + * @param audioProcessor + * The AudioProcessor to add. + */ + public void addAudioProcessor(final AudioProcessor audioProcessor) { + audioProcessors.add(audioProcessor); + LOG.fine("Added an audioprocessor to the list of processors: " + audioProcessor.toString()); + } + + /** + * Removes an AudioProcessor to the chain of processors and calls processingFinished. + * + * @param audioProcessor + * The AudioProcessor to remove. + */ + public void removeAudioProcessor(final AudioProcessor audioProcessor) { + audioProcessors.remove(audioProcessor); + audioProcessor.processingFinished(); + LOG.fine("Remove an audioprocessor to the list of processors: " + audioProcessor); + } + + public void run() { + + + + //Read the first (and in some cases last) audio block. + generateNextAudioBlock(); + + + // As long as the stream has not ended + while (!stopped) { + + //Makes sure the right buffers are processed, they can be changed by audio processors. + for (final AudioProcessor processor : audioProcessors) { + if(!processor.process(audioEvent)){ + //skip to the next audio processors if false is returned. + break; + } + } + + if(!stopped){ + audioEvent.setBytesProcessed((long) samplesProcessed * format.getFrameSize()); + + // Read, convert and process consecutive overlapping buffers. + // Slide the buffer. + generateNextAudioBlock(); + } + } + + // Notify all processors that no more data is available. + // when stop() is called processingFinished is called explicitly, no need to do this again. + // The explicit call is to prevent timing issues. + if(!stopped){ + stop(); + } + } + + + /** + * Stops dispatching audio data. + */ + public void stop() { + stopped = true; + for (final AudioProcessor processor : audioProcessors) { + processor.processingFinished(); + } + } + + /** + * Reads the next audio block. It tries to read the number of bytes defined + * by the audio buffer size minus the overlap. If the expected number of + * bytes could not be read either the end of the stream is reached or + * something went wrong. + * + * The behavior for the first and last buffer is defined by their corresponding the zero pad settings. The method also handles the case if + * the first buffer is also the last. + * + */ + private void generateNextAudioBlock() { + assert floatOverlap < audioFloatBuffer.length; + + //Shift the audio information using array copy since it is probably faster than manually shifting it. + // No need to do this on the first buffer + if(audioFloatBuffer.length == floatOverlap + floatStepSize ){ + System.arraycopy(audioFloatBuffer, floatStepSize, audioFloatBuffer,0 ,floatOverlap); + } + samplesProcessed += floatStepSize; + } + + public void resetTime(){ + samplesProcessed=0; + } + + public TarsosDSPAudioFormat getFormat(){ + return format; + } + + /** + * + * @return The currently processed number of seconds. + */ + public float secondsProcessed(){ + return samplesProcessed / format.getSampleRate() / format.getChannels() ; + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/AudioProcessor.java b/app/src/main/java/be/tarsos/dsp/AudioProcessor.java new file mode 100644 index 0000000..29a3099 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/AudioProcessor.java @@ -0,0 +1,59 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +package be.tarsos.dsp; + +/** + *

+ * AudioProcessors are responsible for actual digital signal processing. The + * interface is simple: a process method that works on an AudioEvent object. + * The AudioEvent contains a buffer with some floats and the same information in + * raw bytes. + *

+ *

+ * AudioProcessors are meant to be chained e.g. execute an effect and + * then play the sound. The chain of audio processor can be interrupted by returning + * false in the process methods. + *

+ * @author Joren Six + */ +public interface AudioProcessor { + + /** + * Process the audio event. Do the actual signal processing on an + * (optionally) overlapping buffer. + * + * @param audioEvent + * The audio event that contains audio data. + * @return False if the chain needs to stop here, true otherwise. This can + * be used to implement e.g. a silence detector. + */ + boolean process(AudioEvent audioEvent); + + /** + * Notify the AudioProcessor that no more data is available and processing + * has finished. Can be used to deallocate resources or cleanup. + */ + void processingFinished(); +} diff --git a/app/src/main/java/be/tarsos/dsp/BitDepthProcessor.java b/app/src/main/java/be/tarsos/dsp/BitDepthProcessor.java new file mode 100644 index 0000000..ada4c62 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/BitDepthProcessor.java @@ -0,0 +1,72 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp; + +/** + * Can be used to show the effect of bit depth modification in real-time. + * It simply transforms every sample to the requested bit depth. + * @author Joren Six + */ +public class BitDepthProcessor implements AudioProcessor { + + int bitDepth = 16; + + /** + * Set a new bit depth + * @param newBitDepth The new bit depth. + */ + public void setBitDepth(int newBitDepth){ + this.bitDepth = newBitDepth; + } + + /** + * The current bit depth + * @return returns the current bit depth. + */ + public int getBitDepth(){ + return this.bitDepth; + } + + + @Override + public boolean process(AudioEvent audioEvent) { + float[] buffer = audioEvent.getFloatBuffer(); + //For e.g. a bith depth of 3, the factor is + // 2^3 - 1 = 7 + float factor = (float) Math.pow(2, bitDepth)/2.0f - 1; + + for(int i = 0 ; i < buffer.length ; i++){ + //the float is scaled to the bith depth + // e.g. if the bit depth is 3 and the value is 0.3: + // ((int)(0.3 * 7)) / 7 = 0.28 + buffer[i]=((int) (buffer[i] * factor))/factor; + } + return true; + } + + @Override + public void processingFinished() { + + } +} diff --git a/app/src/main/java/be/tarsos/dsp/ConstantQ.java b/app/src/main/java/be/tarsos/dsp/ConstantQ.java new file mode 100644 index 0000000..5394c4d --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/ConstantQ.java @@ -0,0 +1,406 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ----------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at +* The Royal Academy of Fine Arts & Royal Conservatory, +* University College Ghent, +* Hoogpoort 64, 9000 Ghent - Belgium +* +* http://tarsos.0110.be/tag/TarsosDSP +* https://github.com/JorenSix/TarsosDSP +* http://tarsos.0110.be/releases/TarsosDSP/ +* +*/ +/* + * Copyright (c) 2006, Karl Helgason + * + * 2007/1/8 modified by p.j.leonard + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * 3. The name of the author may not be used to endorse or promote + * products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +package be.tarsos.dsp; + +import be.tarsos.dsp.util.fft.FFT; + +/** + * Implementation of the Constant Q Transform.
References: + *

+ * Judith C. Brown, + * Calculation of a constant Q spectral transform, J. Acoust. Soc. Am., + * 89(1): 425-434, 1991. + *

+ *

+ * Judith C. Brown and Miller S. Puckette, An efficient algorithm for the calculation of a constant Q transform, J. + * Acoust. Soc. Am., Vol. 92, No. 5, November 1992 + *

+ *

+ * Benjamin Blankertz, The Constant Q Transform + *

+ * + * + * @author Joren Six + * @author Karl Helgason + * @author P.J Leonard + */ +public class ConstantQ implements AudioProcessor { + + + /** + * The minimum frequency, in Hertz. The Constant-Q factors are calculated + * starting from this frequency. + */ + private final float minimumFrequency ; + + /** + * The maximum frequency in Hertz. + */ + private final float maximumFreqency; + + /** + * The length of the underlying FFT. + */ + private int fftLength; + + /** + * Lists the start of each frequency bin, in Hertz. + */ + private final float[] frequencies; + + private final float[][] qKernel; + + private final int[][] qKernel_indexes; + + /** + * The array with constant q coefficients. If you for + * example are interested in coefficients between 256 and 1024 Hz + * (2^8 and 2^10 Hz) and you requested 12 bins per octave, you + * will need 12 bins/octave * 2 octaves * 2 entries/bin = 48 + * places in the output buffer. The coefficient needs two entries + * in the output buffer since they are complex numbers. + */ + private final float[] coefficients; + + /** + * The output buffer with constant q magnitudes. If you for example are + * interested in coefficients between 256 and 1024 Hz (2^8 and 2^10 Hz) and + * you requested 12 bins per octave, you will need 12 bins/octave * 2 + * octaves = 24 places in the output buffer. + */ + private final float[] magnitudes; + + /** + * The number of bins per octave. + */ + private final int binsPerOctave; + + /** + * The underlying FFT object. + */ + private final FFT fft; + + + /** + * Create a new ConstantQ instance + * @param sampleRate The audio sample rate + * @param minFreq The minimum frequency to report in Hz + * @param maxFreq The maximum frequency to report in Hz + * @param binsPerOctave The number of bins per octave + */ + public ConstantQ(float sampleRate, float minFreq, float maxFreq,float binsPerOctave) { + this(sampleRate,minFreq,maxFreq,binsPerOctave,0.001f,1.0f); + } + + /** + * Create a new ConstantQ instance + * @param sampleRate The audio sample rate + * @param minFreq The minimum frequency to report in Hz + * @param maxFreq The maximum frequency to report in Hz + * @param binsPerOctave The number of bins per octave + * @param threshold The threshold used in kernel construction. + * @param spread the spread used to calculate the Constant Q + */ + public ConstantQ(float sampleRate, float minFreq, float maxFreq,float binsPerOctave, float threshold,float spread) { + this.minimumFrequency = minFreq; + this.maximumFreqency = maxFreq; + this.binsPerOctave = (int) binsPerOctave; + + // Calculate Constant Q + double q = 1.0 / (Math.pow(2, 1.0 / binsPerOctave) - 1.0) / spread; + + // Calculate number of output bins + int numberOfBins = (int) Math.ceil(binsPerOctave * Math.log(maximumFreqency / minimumFrequency) / Math.log(2)); + + // Initialize the coefficients array (complex number so 2 x number of bins) + coefficients = new float[numberOfBins*2]; + + // Initialize the magnitudes array + magnitudes = new float[numberOfBins]; + + + // Calculate the minimum length of the FFT to support the minimum + // frequency + float calc_fftlen = (float) Math.ceil(q * sampleRate / minimumFrequency); + + // No need to use power of 2 FFT length. + fftLength = (int) calc_fftlen; + + //System.out.println(fftLength); + //The FFT length needs to be a power of two for performance reasons: + fftLength = (int) Math.pow(2, Math.ceil(Math.log(calc_fftlen) / Math.log(2))); + + // Create FFT object + fft = new FFT(fftLength); + qKernel = new float[numberOfBins][]; + qKernel_indexes = new int[numberOfBins][]; + frequencies = new float[numberOfBins]; + + // Calculate Constant Q kernels + float[] temp = new float[fftLength*2]; + float[] ctemp = new float[fftLength*2]; + int[] cindexes = new int[fftLength]; + for (int i = 0; i < numberOfBins; i++) { + float[] sKernel = temp; + // Calculate the frequency of current bin + frequencies[i] = (float) (minimumFrequency * Math.pow(2, i/binsPerOctave )); + + // Calculate length of window + int len = (int)Math.min(Math.ceil( q * sampleRate / frequencies[i]), fftLength); + + for (int j = 0; j < len; j++) { + + double window = -.5*Math.cos(2.*Math.PI*(double)j/(double)len)+.5;// Hanning Window + // double window = -.46*Math.cos(2.*Math.PI*(double)j/(double)len)+.54; // Hamming Window + + window /= len; + + // Calculate kernel + double x = 2*Math.PI * q * (double)j/(double)len; + sKernel[j*2] = (float) (window * Math.cos(x)); + sKernel[j*2+1] = (float) (window * Math.sin(x)); + } + for (int j = len*2; j < fftLength*2; j++) { + sKernel[j] = 0; + } + + // Perform FFT on kernel + fft.complexForwardTransform(sKernel); + + // Remove all zeros from kernel to improve performance + float[] cKernel = ctemp; + + int k = 0; + for (int j = 0, j2 = sKernel.length - 2; j < sKernel.length/2; j+=2,j2-=2) + { + double absval = Math.sqrt(sKernel[j]*sKernel[j] + sKernel[j+1]*sKernel[j+1]); + absval += Math.sqrt(sKernel[j2]*sKernel[j2] + sKernel[j2+1]*sKernel[j2+1]); + if(absval > threshold) + { + cindexes[k] = j; + cKernel[2*k] = sKernel[j] + sKernel[j2]; + cKernel[2*k + 1] = sKernel[j + 1] + sKernel[j2 + 1]; + k++; + } + } + + sKernel = new float[k * 2]; + int[] indexes = new int[k]; + + if (k * 2 >= 0) System.arraycopy(cKernel, 0, sKernel, 0, k * 2); + System.arraycopy(cindexes, 0, indexes, 0, k); + + // Normalize fft output + for (int j = 0; j < sKernel.length; j++) + sKernel[j] /= fftLength; + + // Perform complex conjugate on sKernel + for (int j = 1; j < sKernel.length; j += 2) + sKernel[j] = -sKernel[j]; + + for (int j = 0; j < sKernel.length; j ++) + sKernel[j] = -sKernel[j]; + + qKernel_indexes[i] = indexes; + qKernel[i] = sKernel; + } + } + + /** + * Take an input buffer with audio and calculate the constant Q + * coefficients. + * + * @param inputBuffer + * The input buffer with audio. + * + * + */ + public void calculate(float[] inputBuffer) { + fft.forwardTransform(inputBuffer); + for (int i = 0; i < qKernel.length; i++) { + float[] kernel = qKernel[i]; + int[] indexes = qKernel_indexes[i]; + float t_r = 0; + float t_i = 0; + for (int j = 0, l = 0; j < kernel.length; j += 2, l++) { + int jj = indexes[l]; + float b_r = inputBuffer[jj]; + float b_i = inputBuffer[jj + 1]; + float k_r = kernel[j]; + float k_i = kernel[j + 1]; + // COMPLEX: T += B * K + t_r += b_r * k_r - b_i * k_i; + t_i += b_r * k_i + b_i * k_r; + } + coefficients[i * 2] = t_r; + coefficients[i * 2 + 1] = t_i; + } + } + + /** + * Take an input buffer with audio and calculate the constant Q magnitudes. + * @param inputBuffer The input buffer with audio. + */ + public void calculateMagintudes(float[] inputBuffer) { + calculate(inputBuffer); + for(int i = 0 ; i < magnitudes.length ; i++){ + magnitudes[i] = (float) Math.sqrt(coefficients[i*2] * coefficients[i*2] + coefficients[i*2+1] * coefficients[i*2+1]); + } + } + + + @Override + public boolean process(AudioEvent audioEvent) { + float[] audioBuffer = audioEvent.getFloatBuffer().clone(); + if(audioBuffer.length != getFFTlength()){ + throw new IllegalArgumentException(String.format("The length of the fft (%d) should be the same as the length of the audio buffer (%d)",getFFTlength(),audioBuffer.length)); + } + calculateMagintudes(audioBuffer); + return true; + } + + @Override + public void processingFinished() { + // Do nothing. + } + + //----GETTERS + + /** + * @return The list of starting frequencies for each band. In Hertz. + */ + public float[] getFreqencies() { + return frequencies; + } + + /** + * Returns the Constant Q magnitudes calculated for the previous audio + * buffer. Beware: the array is reused for performance reasons. If your need + * to cache your results, please copy the array. + * @return The output buffer with constant q magnitudes. If you for example are + * interested in coefficients between 256 and 1024 Hz (2^8 and 2^10 Hz) and + * you requested 12 bins per octave, you will need 12 bins/octave * 2 + * octaves = 24 places in the output buffer. + */ + public float[] getMagnitudes() { + return magnitudes; + } + + + /** + * Return the Constant Q coefficients calculated for the previous audio + * buffer. Beware: the array is reused for performance reasons. If your need + * to cache your results, please copy the array. + * + * @return The array with constant q coefficients. If you for example are + * interested in coefficients between 256 and 1024 Hz (2^8 and 2^10 + * Hz) and you requested 12 bins per octave, you will need 12 + * bins/octave * 2 octaves * 2 entries/bin = 48 places in the output + * buffer. The coefficient needs two entries in the output buffer + * since they are complex numbers. + */ + public float[] getCoefficients() { + return coefficients; + } + + /** + * @return The number of coefficients, output bands. + */ + public int getNumberOfOutputBands() { + return frequencies.length; + } + + /** + * @return The required length the FFT. + */ + public int getFFTlength() { + return fftLength; + } + + /** + * @return the number of bins every octave. + */ + public int getBinsPerOctave(){ + return binsPerOctave; + } +} diff --git a/app/src/main/java/be/tarsos/dsp/DetermineDurationProcessor.java b/app/src/main/java/be/tarsos/dsp/DetermineDurationProcessor.java new file mode 100644 index 0000000..d4c9fc9 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/DetermineDurationProcessor.java @@ -0,0 +1,51 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp; + +public class DetermineDurationProcessor implements AudioProcessor { + + long durationInSamples; + float sampleRate; + AudioEvent lastEvent; + + @Override + public boolean process(AudioEvent audioEvent) { + lastEvent = audioEvent; + return true; + } + + public double getDurationInSeconds(){ + return durationInSamples/sampleRate; + } + + public double getDurationInSamples(){ + return durationInSamples; + } + + @Override + public void processingFinished() { + sampleRate = lastEvent.getSampleRate(); + durationInSamples = lastEvent.getSamplesProcessed() + lastEvent.getFloatBuffer().length; + } +} diff --git a/app/src/main/java/be/tarsos/dsp/EnvelopeFollower.java b/app/src/main/java/be/tarsos/dsp/EnvelopeFollower.java new file mode 100644 index 0000000..53e5267 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/EnvelopeFollower.java @@ -0,0 +1,130 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +package be.tarsos.dsp; + +/** + * An envelope follower follows the envelope of a signal. Sometimes the name + * envelope detector is used. From wikipedia: + *
An envelope detector + * is an electronic circuit that takes a high-frequency signal as input and + * provides an output which is the envelope of the original signal. The + * capacitor in the circuit stores up charge on the rising edge, and releases it + * slowly through the resistor when the signal falls. The diode in series + * rectifies the incoming signal, allowing current flow only when the positive + * input terminal is at a higher potential than the negative input terminal. + *
+ * + * The resulting envelope is stored in the buffer in the processed AudioEvent. The class can be used thusly: + * + *
+ * EnvelopeFollower follower = new EnvelopeFollower(44100);
+ * 		
+ * AudioDispatcher dispatcher = AudioDispatcher.fromFloatArray(sine, 44100, 1024, 0);
+ * 	
+ * 	
+ * 	dispatcher.addAudioProcessor(follower);
+ * 	dispatcher.addAudioProcessor(new AudioProcessor() {
+ * 	
+ * 		public boolean process(AudioEvent audioEvent) {
+ * 			//envelope
+ * 			float buffer[] = audioEvent.getFloatBuffer();
+ * 			for(int i = 0 ; i < buffer.length ; i++){
+ * 				System.out.println(buffer[i]);
+ * 			}
+ * 			return true;
+ * 		}
+ * 			
+ * 		public void processingFinished() {
+ *  	}
+ * 	});
+ * 	dispatcher.run();
+ *  
+ * + * + * @author Joren Six + * + */ +public class EnvelopeFollower implements AudioProcessor { + + /** + * Defines how fast the envelope raises, defined in seconds. + */ + private static final double DEFAULT_ATTACK_TIME = 0.0002;//in seconds + /** + * Defines how fast the envelope goes down, defined in seconds. + */ + private static final double DEFAULT_RELEASE_TIME = 0.0004;//in seconds + + float gainAttack ; + float gainRelease; + float envelopeOut = 0.0f; + + /** + * Create a new envelope follower, with a certain sample rate. + * @param sampleRate The sample rate of the audio signal. + */ + public EnvelopeFollower(double sampleRate){ + this(sampleRate,DEFAULT_ATTACK_TIME,DEFAULT_RELEASE_TIME); + } + + /** + * Create a new envelope follower, with a certain sample rate. + * @param sampleRate The sample rate of the audio signal. + * @param attackTime Defines how fast the envelope raises, defined in seconds. + * @param releaseTime Defines how fast the envelope goes down, defined in seconds. + */ + public EnvelopeFollower(double sampleRate, double attackTime,double releaseTime){ + gainAttack = (float) Math.exp(-1.0/(sampleRate*attackTime)); + gainRelease = (float) Math.exp(-1.0/(sampleRate*releaseTime)); + } + + @Override + public boolean process(AudioEvent audioEvent) { + float[] buffer = audioEvent.getFloatBuffer(); + calculateEnvelope(buffer); + return true; + } + + /** + * Determine the envelope of an audio buffer + * @param buffer The audio buffer. + */ + public void calculateEnvelope(float[] buffer){ + for(int i = 0 ; i < buffer.length ; i++){ + float envelopeIn = Math.abs(buffer[i]); + if(envelopeOut < envelopeIn){ + envelopeOut = envelopeIn + gainAttack * (envelopeOut - envelopeIn); + } else { + envelopeOut = envelopeIn + gainRelease * (envelopeOut - envelopeIn); + } + buffer[i] = envelopeOut; + } + } + + @Override + public void processingFinished() { + + } +} diff --git a/app/src/main/java/be/tarsos/dsp/FadeIn.java b/app/src/main/java/be/tarsos/dsp/FadeIn.java new file mode 100644 index 0000000..1088beb --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/FadeIn.java @@ -0,0 +1,54 @@ +package be.tarsos.dsp; + +public class FadeIn implements AudioProcessor { + + private final double duration; + private double firstTime=-1; + private double time; + private final GainProcessor gp=new GainProcessor(0.1); + private boolean fadingIn=true; + + /** + * A new fade in processor + * @param d duration of the fade in seconds + */ + public FadeIn(double d) // + { + this.duration=d; + } + + /** + * Stop fade in processing immediately + */ + public void stopFadeIn() + { + this.fadingIn=false; + } + + @Override + public boolean process(AudioEvent audioEvent) + { + // Don't do anything after the end of the Fade In + if(fadingIn) + { + if(firstTime==-1) + firstTime=audioEvent.getTimeStamp(); + + + // Increase the gain according to time since the beginning of the Fade In + time=audioEvent.getTimeStamp()-firstTime; + gp.setGain(time/duration); + gp.process(audioEvent); + if(time > duration){ + fadingIn = false; + } + } + return true; + } + + @Override + public void processingFinished() + { + gp.processingFinished(); + } +} diff --git a/app/src/main/java/be/tarsos/dsp/FadeOut.java b/app/src/main/java/be/tarsos/dsp/FadeOut.java new file mode 100644 index 0000000..954f977 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/FadeOut.java @@ -0,0 +1,50 @@ +package be.tarsos.dsp; + +public class FadeOut implements AudioProcessor { + + private final double duration; + private double firstTime=-1; + private double time; + private boolean isFadeOut=false; + private final GainProcessor gp=new GainProcessor(0.9); + + /** + * A new fade out processor + * @param d duration of the fade out in seconds + */ + public FadeOut(double d) // d= + { + this.duration=d; + } + + /** + * Start fade out processing now + */ + public void startFadeOut() + { + this.isFadeOut=true; + } + + @Override + public boolean process(AudioEvent audioEvent) + { + // Don't do anything before the beginning of Fade Out + if(isFadeOut) + { + if(firstTime==-1) + firstTime=audioEvent.getTimeStamp(); + + // Decrease the gain according to time since the beginning of the Fade Out + time=audioEvent.getTimeStamp()-firstTime; + gp.setGain(1-time/duration); + gp.process(audioEvent); + } + return true; + } + + @Override + public void processingFinished() + { + gp.processingFinished(); + } +} \ No newline at end of file diff --git a/app/src/main/java/be/tarsos/dsp/GainProcessor.java b/app/src/main/java/be/tarsos/dsp/GainProcessor.java new file mode 100644 index 0000000..0e62418 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/GainProcessor.java @@ -0,0 +1,74 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +package be.tarsos.dsp; + +/** + * With the gain processor it is possible to adapt the volume of the sound. With + * a gain of 1, nothing happens. A gain greater than one is a volume increase a + * gain between zero and one, exclusive, is a decrease. If you need to flip the + * sign of the audio samples, you can by providing a gain of -1.0. but I have no + * idea what you could gain by doing that (pathetic pun, I know). + * + * @author Joren Six + */ +public class GainProcessor implements AudioProcessor { + private double gain; + + /** + * Create a new gain processor + * @param newGain the gain + */ + public GainProcessor(double newGain) { + setGain(newGain); + } + + /** + * Set the gain applied to the next buffer. + * @param newGain The new gain. + */ + public void setGain(double newGain) { + this.gain = newGain; + } + + @Override + public boolean process(AudioEvent audioEvent) { + float[] audioFloatBuffer = audioEvent.getFloatBuffer(); + for (int i = audioEvent.getOverlap(); i < audioFloatBuffer.length ; i++) { + float newValue = (float) (audioFloatBuffer[i] * gain); + if(newValue > 1.0f) { + newValue = 1.0f; + } else if(newValue < -1.0f) { + newValue = -1.0f; + } + audioFloatBuffer[i] = newValue; + } + return true; + } + + @Override + public void processingFinished() { + // NOOP + } +} diff --git a/app/src/main/java/be/tarsos/dsp/MultichannelToMono.java b/app/src/main/java/be/tarsos/dsp/MultichannelToMono.java new file mode 100644 index 0000000..b24a2dc --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/MultichannelToMono.java @@ -0,0 +1,71 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp; + +/** + * Process multi channels audio to mono + */ +public class MultichannelToMono implements AudioProcessor{ + + private final int channels; + private final boolean mean; + + public MultichannelToMono(int numberOfChannels,boolean meanOfchannels){ + channels = numberOfChannels; + mean = meanOfchannels; + } + + @Override + public boolean process(AudioEvent audioEvent) { + float[] buffer = audioEvent.getFloatBuffer(); + float[] newBuffer = new float[buffer.length/channels]; + + if(mean){ + if(channels==2){ + for(int i = 0 ; i < buffer.length ; i = i + channels){ + newBuffer[i/channels]=(buffer[i]+buffer[i+1])/2.0f; + } + }else{ + for(int i = 0 ; i < buffer.length ; i = i + channels){ + double sum = 0; + for(int j = 0; j < channels;j++){ + sum = sum + buffer[i+j]; + } + newBuffer[i/channels]=(float) (sum/channels); + } + } + }else{ + for(int i = 0 ; i < buffer.length ; i = i + channels){ + newBuffer[i/channels]=buffer[i]; + } + } + + audioEvent.setFloatBuffer(newBuffer); + return true; + } + + @Override + public void processingFinished() { + } +} diff --git a/app/src/main/java/be/tarsos/dsp/Oscilloscope.java b/app/src/main/java/be/tarsos/dsp/Oscilloscope.java new file mode 100644 index 0000000..975d9fc --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/Oscilloscope.java @@ -0,0 +1,96 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +package be.tarsos.dsp; + +/** + * The oscilloscope generates a float array with + * array[i] an x coordinate in percentage + * array[i+1] the value of the amplitude in audio buffer + * array[i+2] another x coordinate in percentage + * array[i+3] the next amplitude in the audio buffer + * + * The implementation is based on the one by Dan Ellis found at http://www.ee.columbia.edu/~dpwe/resources/Processing/ + * @author Dan Ellis + * @author Joren Six + * + */ +public class Oscilloscope implements AudioProcessor { + public interface OscilloscopeEventHandler{ + /** + * @param data The data contains a float array with: + * array[i] an x coordinate in percentage + * array[i+1] the value of the amplitude in audio buffer + * array[i+2] another x coordinate in percentage + * array[i+3] the next amplitude in the audio buffer + * @param event An audio Event. + */ + void handleEvent(float[] data, AudioEvent event); + } + float[] dataBuffer; + private final OscilloscopeEventHandler handler; + public Oscilloscope(OscilloscopeEventHandler handler){ + this.handler = handler; + } + + @Override + public boolean process(AudioEvent audioEvent) { + float[] audioBuffer = audioEvent.getFloatBuffer(); + int offset = 0; + float maxdx = 0; + for (int i = 0; i < audioBuffer.length / 4; ++i) { + float dx = audioBuffer[i + 1] - audioBuffer[i]; + if (dx > maxdx) { + offset = i; + maxdx = dx; + } + } + + float tbase = audioBuffer.length / 2; + + + int length = Math.min((int) tbase, audioBuffer.length-offset); + if(dataBuffer == null || dataBuffer.length != length * 4){ + dataBuffer = new float[length * 4]; + } + + int j = 0; + for(int i = 0; i < length - 1; i++){ + float x1 = i / tbase; + float x2 = i / tbase; + dataBuffer[j] = x1; + dataBuffer[j+1] = audioBuffer[i+offset]; + dataBuffer[j+2] = x2; + dataBuffer[j+3] = audioBuffer[i+1+offset]; + j = j + 4; + } + handler.handleEvent(dataBuffer, audioEvent); + return true; + } + + @Override + public void processingFinished() { + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/PitchShifter.java b/app/src/main/java/be/tarsos/dsp/PitchShifter.java new file mode 100644 index 0000000..d415f25 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/PitchShifter.java @@ -0,0 +1,182 @@ +package be.tarsos.dsp; + + +import be.tarsos.dsp.util.fft.FFT; + +/** + * This is a translation of code by Stephan M. Bernsee. See the following explanation on this code: + * Pitch shifting using the STFT. + * + * @author Joren Six + * @author Stephan M. Bernsee + */ +public class PitchShifter implements AudioProcessor{ + + private final FFT fft; + private final int size; + private final float[] currentMagnitudes; + private final float[] currentPhase; + private final float[] currentFrequencies; + private final float[] outputAccumulator; + private final float[] summedPhase; + + private final float[] previousPhase; + + private double pitchShiftRatio = 0; + + private final double sampleRate; + + private final long osamp; + + private final double excpt; + + public PitchShifter(double factor, double sampleRate, int size, int overlap){ + + + pitchShiftRatio = factor; + this.size = size; + this.sampleRate = sampleRate; + //this.d = d; + + osamp=size/(size-overlap); + + this.excpt = 2.*Math.PI*(double)(size-overlap)/(double)size; + + fft = new FFT(size); + + currentMagnitudes = new float[size/2]; + currentFrequencies = new float[size/2]; + currentPhase = new float[size/2]; + + previousPhase = new float[size/2]; + summedPhase = new float[size/2]; + outputAccumulator = new float[size*2]; + } + + public void setPitchShiftFactor(float newPitchShiftFactor){ + this.pitchShiftRatio = newPitchShiftFactor; + } + + @Override + public boolean process(AudioEvent audioEvent) { + //see http://downloads.dspdimension.com/smbPitchShift.cpp + + /* ***************** ANALYSIS ******************* */ + float[] fftData = audioEvent.getFloatBuffer().clone(); + + for(int i = 0 ; i= 0) + qpd += qpd&1; + else + qpd -= qpd&1; + tmp -= Math.PI*(double)qpd; + + /* get deviation from bin frequency from the +/- Pi interval */ + tmp = osamp*tmp/(2.*Math.PI); + + /* compute the k-th partials' true frequency */ + tmp = (double)i*freqPerBin + tmp*freqPerBin; + + /* store magnitude and true frequency in analysis arrays */ + currentFrequencies[i] = (float) tmp; + } + + /* ***************** PROCESSING ******************* */ + /* this does the actual pitch shifting */ + float[] newMagnitudes = new float[size/2]; + float[] newFrequencies = new float[size/2]; + + for(int i = 0 ; i < size/2 ; i++){ + int index = (int)(i * pitchShiftRatio); + if(index < size/2){ + newMagnitudes[index] += currentMagnitudes[i]; + newFrequencies[index] = (float) (currentFrequencies[i]*pitchShiftRatio); + } + } + + ///Synthesis**** + float[] newFFTData = new float[size]; + + for(int i =0 ; i < size/2 ; i++){ + + float magn = newMagnitudes[i]; + double tmp = newFrequencies[i]; + + /* subtract bin mid frequency */ + tmp -= (double)i*freqPerBin; + + /* get bin deviation from freq deviation */ + tmp /= freqPerBin; + + /* take osamp into account */ + tmp = 2.*Math.PI*tmp/osamp; + + /* add the overlap phase advance back in */ + tmp += (double)i*excpt; + + /* accumulate delta phase to get bin phase */ + summedPhase[i] += tmp; + float phase = summedPhase[i]; + + /* get real and imag part and re-interleave */ + newFFTData[2*i] = (float) (magn * Math.cos(phase)); + newFFTData[2*i+1] = (float) (magn* Math.sin(phase)); + } + + /* zero negative frequencies */ + for (int i = size/2+2; i < size; i++){ + newFFTData[i] = 0.f; + } + + fft.backwardsTransform(newFFTData); + for(int i = 0 ; i < newFFTData.length ; i ++){ + float window = (float) (-.5*Math.cos(2.*Math.PI*(double)i/(double)size)+.5); + //outputAccumulator[i] += 2000*window*newFFTData[i]/(float) (size*osamp); + outputAccumulator[i] += window*newFFTData[i]/(float) osamp; + if(outputAccumulator[i] > 1.0 || outputAccumulator[i] < -1.0 ){ + System.err.println("Clipping!"); + } + } + + int stepSize = (int) (size/osamp); + + + + //Arrays.fill(audioBuffer, 0); + System.arraycopy(outputAccumulator, stepSize, outputAccumulator, 0, size); + + float[] audioBuffer = new float[audioEvent.getFloatBuffer().length]; + audioEvent.setFloatBuffer(audioBuffer); + System.arraycopy(outputAccumulator, 0, audioBuffer,size-stepSize, stepSize); + + return true; + } + + @Override + public void processingFinished() { + + } +} diff --git a/app/src/main/java/be/tarsos/dsp/SilenceDetector.java b/app/src/main/java/be/tarsos/dsp/SilenceDetector.java new file mode 100644 index 0000000..bc47744 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/SilenceDetector.java @@ -0,0 +1,142 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +package be.tarsos.dsp; + + +/** + * The continuing silence detector does not break the audio processing pipeline when silence is detected. + */ +public class SilenceDetector implements AudioProcessor { + + public static final double DEFAULT_SILENCE_THRESHOLD = -70.0;//db + + private final double threshold;//db + + private final boolean breakProcessingQueueOnSilence; + + /** + * Create a new silence detector with a default threshold. + */ + public SilenceDetector(){ + this(DEFAULT_SILENCE_THRESHOLD,false); + } + + /** + * Create a new silence detector with a defined threshold. + * + * @param silenceThreshold + * The threshold which defines when a buffer is silent (in dB). + * Normal values are [-70.0,-30.0] dB SPL. + * @param breakProcessingQueueOnSilence + */ + public SilenceDetector(final double silenceThreshold,boolean breakProcessingQueueOnSilence){ + this.threshold = silenceThreshold; + this.breakProcessingQueueOnSilence = breakProcessingQueueOnSilence; + } + + /** + * Calculates and returns the root mean square of the signal. Please + * cache the result since it is calculated every time. + * @param floatBuffer The audio buffer to calculate the RMS for. + * @return The RMS of + * the signal present in the current buffer. + */ + public static double calculateRMS(float[] floatBuffer){ + double rms = 0.0; + for (int i = 0; i < floatBuffer.length; i++) { + rms += floatBuffer[i] * floatBuffer[i]; + } + rms = rms / Double.valueOf(floatBuffer.length); + rms = Math.sqrt(rms); + return rms; + } + + /** + * Returns the dBSPL for a buffer. + * + * @param buffer + * The buffer with audio information. + * @return The dBSPL level for the buffer. + */ + private static double soundPressureLevel(final float[] buffer) { + double rms = calculateRMS(buffer); + return linearToDecibel(rms); + } + + /** + * Converts a linear to a dB value. + * + * @param value + * The value to convert. + * @return The converted value. + */ + private static double linearToDecibel(final double value) { + return 20.0 * Math.log10(value); + } + + double currentSPL = 0; + public double currentSPL(){ + return currentSPL; + } + + /** + * Checks if the dBSPL level in the buffer falls below a certain threshold. + * + * @param buffer + * The buffer with audio information. + * @param silenceThreshold + * The threshold in dBSPL + * @return True if the audio information in buffer corresponds with silence, + * false otherwise. + */ + public boolean isSilence(final float[] buffer, final double silenceThreshold) { + currentSPL = soundPressureLevel(buffer); + return currentSPL < silenceThreshold; + } + + public boolean isSilence(final float[] buffer) { + return isSilence(buffer, threshold); + } + + + @Override + public boolean process(AudioEvent audioEvent) { + boolean isSilence = isSilence(audioEvent.getFloatBuffer()); + //break processing chain on silence? + if(breakProcessingQueueOnSilence){ + //break if silent + return !isSilence; + }else{ + //never break the chain + return true; + } + } + + + @Override + public void processingFinished() { + } +} diff --git a/app/src/main/java/be/tarsos/dsp/SpectralPeakProcessor.java b/app/src/main/java/be/tarsos/dsp/SpectralPeakProcessor.java new file mode 100644 index 0000000..43967ea --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/SpectralPeakProcessor.java @@ -0,0 +1,478 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import be.tarsos.dsp.util.PitchConverter; +import be.tarsos.dsp.util.fft.FFT; +import be.tarsos.dsp.util.fft.HammingWindow; + +/** + *

+ * This class implements a spectral peak follower as described in Sethares et + * al. 2009 - Spectral Tools for Dynamic Tonality and Audio Morphing - section + * "Analysis-Resynthessis". It calculates a noise floor and picks spectral peaks + * rising above a calculated noise floor with a certain factor. The noise floor + * is determined using a simple median filter. + *

+ *

+ * Parts of the code is modified from the code accompanying + * "Spectral Tools for Dynamic Tonality and Audio Morphing". + *

+ *

+ * To get the spectral peaks from an audio frame, call getPeakList + * +AudioDispatcher dispatcher = new AudioDispatcher(stream, fftsize, overlap); +dispatcher.addAudioProcessor(spectralPeakFollower); +dispatcher.addAudioProcessor(new AudioProcessor() { + + public void processingFinished() { + } + + public boolean process(AudioEvent audioEvent) { + float[] noiseFloor = SpectralPeakProcessor.calculateNoiseFloor(spectralPeakFollower.getMagnitudes(), medianFilterLength, noiseFloorFactor); + List localMaxima = SpectralPeakProcessor.findLocalMaxima(spectralPeakFollower.getMagnitudes(), noiseFloor); + List list = SpectralPeakProcessor.findPeaks(spectralPeakFollower.getMagnitudes(), spectralPeakFollower.getFrequencyEstimates(), localMaxima, numberOfPeaks); + // do something with the list... + return true; + } +}); +dispatcher.run(); + + * + * @author Joren Six + * @author William A. Sethares + * @author Andrew J. Milne + * @author Stefan Tiedje + * @author Anthony Prechtl + * @author James Plamondon + * + */ +public class SpectralPeakProcessor implements AudioProcessor { + + /** + * The sample rate of the signal. + */ + private final int sampleRate; + + /** + * Cached calculations for the frequency calculation + */ + private final double dt; + private final double cbin; + private final double inv_2pi; + private final double inv_deltat; + private final double inv_2pideltat; + + /** + * The fft object used to calculate phase and magnitudes. + */ + private final FFT fft; + + /** + * The pahse info of the current frame. + */ + private final float[] currentPhaseOffsets; + + /** + * The magnitudes in the current frame. + */ + private final float[] magnitudes; + + /** + * Detailed frequency estimates for each bin, using phase info + */ + private final float[] frequencyEstimates; + + /** + * The phase information of the previous frame, or null. + */ + private float[] previousPhaseOffsets; + + + + public SpectralPeakProcessor(int bufferSize, int overlap, int sampleRate) { + fft = new FFT(bufferSize, new HammingWindow()); + + magnitudes = new float[bufferSize / 2]; + currentPhaseOffsets = new float[bufferSize / 2]; + frequencyEstimates = new float[bufferSize / 2]; + + dt = (bufferSize - overlap) / (double) sampleRate; + cbin = (double) (dt * sampleRate / (double) bufferSize); + + inv_2pi = (double) (1.0 / (2.0 * Math.PI)); + inv_deltat = (double) (1.0 / dt); + inv_2pideltat = (double) (inv_deltat * inv_2pi); + + this.sampleRate = sampleRate; + + } + + private void calculateFFT(float[] audio) { + // Clone to prevent overwriting audio data + float[] fftData = audio.clone(); + // Extract the power and phase data + fft.powerPhaseFFT(fftData, magnitudes, currentPhaseOffsets); + } + + private void normalizeMagintudes(){ + float maxMagnitude = (float) -1e6; + for(int i = 0;i= 0 && j < magnitudes.length){ + noiseFloorBuffer[index] = magnitudes[j]; + } else{ + noiseFloorBuffer[index] = median; + } + index++; + } + // calculate the noise floor value. + noisefloor[i] = (float) (median(noiseFloorBuffer) * (noiseFloorFactor)) ; + } + + float rampLength = 12.0f; + for(int i = 0 ; i <= rampLength ; i++){ + //ramp + float ramp = 1.0f; + ramp = (float) (-1 * (Math.log(i/rampLength))) + 1.0f; + noisefloor[i] = ramp * noisefloor[i]; + } + + return noisefloor; + } + + /** + * Finds the local magintude maxima and stores them in the given list. + * @param magnitudes The magnitudes. + * @param noisefloor The noise floor. + * @return a list of local maxima. + */ + public static List findLocalMaxima(float[] magnitudes,float[] noisefloor){ + List localMaximaIndexes = new ArrayList(); + for (int i = 1; i < magnitudes.length - 1; i++) { + boolean largerThanPrevious = (magnitudes[i - 1] < magnitudes[i]); + boolean largerThanNext = (magnitudes[i] > magnitudes[i + 1]); + boolean largerThanNoiseFloor = (magnitudes[i] > noisefloor[i]); + if (largerThanPrevious && largerThanNext && largerThanNoiseFloor) { + localMaximaIndexes.add(i); + } + } + return localMaximaIndexes; + } + + /** + * @param magnitudes the magnitudes. + * @return the index for the maximum magnitude. + */ + private static int findMaxMagnitudeIndex(float[] magnitudes){ + int maxMagnitudeIndex = 0; + float maxMagnitude = (float) -1e6; + for (int i = 1; i < magnitudes.length - 1; i++) { + if(magnitudes[i] > maxMagnitude){ + maxMagnitude = magnitudes[i]; + maxMagnitudeIndex = i; + } + } + return maxMagnitudeIndex; + } + + /** + * + * @param magnitudes the magnitudes.. + * @param frequencyEstimates The frequency estimates for each bin. + * @param localMaximaIndexes The indexes of the local maxima. + * @param numberOfPeaks The requested number of peaks. + * @param minDistanceInCents The minimum distance in cents between the peaks + * @return A list with spectral peaks. + */ + public static List findPeaks(float[] magnitudes, float[] frequencyEstimates, List localMaximaIndexes, int numberOfPeaks, int minDistanceInCents){ + int maxMagnitudeIndex = findMaxMagnitudeIndex(magnitudes); + List spectralPeakList = new ArrayList(); + + if(localMaximaIndexes.size()==0) + return spectralPeakList; + + float referenceFrequency=0; + //the frequency of the bin with the highest magnitude + referenceFrequency = frequencyEstimates[maxMagnitudeIndex]; + + //remove frequency estimates below zero + for(int i = 0 ; i < localMaximaIndexes.size() ; i++){ + if(frequencyEstimates[localMaximaIndexes.get(i)] < 0 ){ + localMaximaIndexes.remove(i); + frequencyEstimates[localMaximaIndexes.get(i)]=1;//Hz + i--; + } + } + + //filter the local maxima indexes, remove peaks that are too close to each other + //assumes that localmaximaIndexes is sorted from lowest to higest index + for(int i = 1 ; i < localMaximaIndexes.size() ; i++){ + double centCurrent = PitchConverter.hertzToAbsoluteCent(frequencyEstimates[localMaximaIndexes.get(i)]); + double centPrev = PitchConverter.hertzToAbsoluteCent(frequencyEstimates[localMaximaIndexes.get(i-1)]); + double centDelta = centCurrent - centPrev; + if(centDelta < minDistanceInCents ){ + if(magnitudes[localMaximaIndexes.get(i)] > magnitudes[localMaximaIndexes.get(i-1)]){ + localMaximaIndexes.remove(i-1); + }else{ + localMaximaIndexes.remove(i); + } + i--; + } + } + + // Retrieve the maximum values for the indexes + float[] maxMagnitudes = new float[localMaximaIndexes.size()]; + for(int i = 0 ; i < localMaximaIndexes.size() ; i++){ + maxMagnitudes[i] = magnitudes[localMaximaIndexes.get(i)]; + } + // Sort the magnitudes in ascending order + Arrays.sort(maxMagnitudes); + + // Find the threshold, the first value or somewhere in the array. + float peakthresh = maxMagnitudes[0]; + if (maxMagnitudes.length > numberOfPeaks) { + peakthresh = maxMagnitudes[maxMagnitudes.length - numberOfPeaks]; + } + + //store the peaks + for(Integer i : localMaximaIndexes){ + if(magnitudes[i]>= peakthresh){ + final float frequencyInHertz= frequencyEstimates[i]; + //ignore frequencies lower than 30Hz + float binMagnitude = magnitudes[i]; + SpectralPeak peak = new SpectralPeak(0,frequencyInHertz, binMagnitude, referenceFrequency,i); + spectralPeakList.add(peak); + } + } + return spectralPeakList; + } + + public static final float median(double[] arr){ + return percentile(arr, 0.5); + } + + /** + * Returns the p-th percentile of values in an array. You can use this + * function to establish a threshold of acceptance. For example, you can + * decide to examine candidates who score above the 90th percentile (0.9). + * The elements of the input array are modified (sorted) by this method. + * + * @param arr An array of sample data values that define relative standing. + * The contents of the input array are sorted by this method. + * @param p The percentile value in the range 0..1, inclusive. + * @return The p-th percentile of values in an array. If p is not a multiple + * of 1/(n - 1), this method interpolates to determine the value at + * the p-th percentile. + **/ + public static final float percentile( double[] arr, double p ) { + + if (p < 0 || p > 1) + throw new IllegalArgumentException("Percentile out of range."); + + // Sort the array in ascending order. + Arrays.sort(arr); + + // Calculate the percentile. + double t = p*(arr.length - 1); + int i = (int)t; + + return (float) ((i + 1 - t)*arr[i] + (t - i)*arr[i + 1]); + } + + public static double median(float[] m) { +// Sort the array in ascending order. + Arrays.sort(m); + int middle = m.length/2; + if (m.length%2 == 1) { + return m[middle]; + } else { + return (m[middle-1] + m[middle]) / 2.0; + } + } + + + public static class SpectralPeak{ + private final float frequencyInHertz; + private final float magnitude; + private final float referenceFrequency; + private final int bin; + /** + * Timestamp in fractional seconds + */ + private final float timeStamp; + + public SpectralPeak(float timeStamp,float frequencyInHertz, float magnitude,float referenceFrequency,int bin){ + this.frequencyInHertz = frequencyInHertz; + this.magnitude = magnitude; + this.referenceFrequency = referenceFrequency; + this.timeStamp = timeStamp; + this.bin = bin; + } + + public float getRelativeFrequencyInCents(){ + if(referenceFrequency > 0 && frequencyInHertz > 0){ + float refInCents = (float) PitchConverter.hertzToAbsoluteCent(referenceFrequency); + float valueInCents = (float) PitchConverter.hertzToAbsoluteCent(frequencyInHertz); + return valueInCents - refInCents; + }else{ + return 0; + } + } + + public float getTimeStamp(){ + return timeStamp; + } + + public float getMagnitude(){ + return magnitude; + } + + public float getFrequencyInHertz(){ + return frequencyInHertz; + } + + public float getRefFrequencyInHertz(){ + return referenceFrequency; + } + + public String toString(){ + return String.format("%.2f %.2f %.2f", frequencyInHertz,getRelativeFrequencyInCents(),magnitude); + } + + public int getBin() { + return bin; + } + } + + +} diff --git a/app/src/main/java/be/tarsos/dsp/StopAudioProcessor.java b/app/src/main/java/be/tarsos/dsp/StopAudioProcessor.java new file mode 100644 index 0000000..93bfde0 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/StopAudioProcessor.java @@ -0,0 +1,56 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp; + +/** + * Simply stops the audio processing + * pipeline if the stop time is reached. + * @author Joren Six + * + */ +public class StopAudioProcessor implements AudioProcessor { + + private double stopTime; + public StopAudioProcessor(double stopTime){ + this.stopTime = stopTime; + } + + @Override + public boolean process(AudioEvent audioEvent) { + return audioEvent.getTimeStamp() <= stopTime; + } + + @Override + public void processingFinished() { + + } + + public double getStopAt() { + return stopTime; + } + + public void setStopTime(double stopTime) { + this.stopTime = stopTime; + } +} diff --git a/app/src/main/java/be/tarsos/dsp/WaveformSimilarityBasedOverlapAdd.java b/app/src/main/java/be/tarsos/dsp/WaveformSimilarityBasedOverlapAdd.java new file mode 100644 index 0000000..42d01bf --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/WaveformSimilarityBasedOverlapAdd.java @@ -0,0 +1,403 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +package be.tarsos.dsp; + + +/** + * + *

+ * An overlap-add technique based on waveform similarity (WSOLA) for high + * quality time-scale modification of speech + *

+ *

+ * A concept of waveform similarity for tackling the problem of time-scale + * modification of speech is proposed. It is worked out in the context of + * short-time Fourier transform representations. The resulting WSOLA + * (waveform-similarity-based synchronized overlap-add) algorithm produces + * high-quality speech output, is algorithmically and computationally efficient + * and robust, and allows for online processing with arbitrary time-scaling + * factors that may be specified in a time-varying fashion and can be chosen + * over a wide continuous range of values. + *

+ *

+ * Inspired by the work soundtouch by Olli Parviainen, + * http://www.surina.net/soundtouch, especially the TDStrech.cpp file. + *

+ * @author Joren Six + * @author Olli Parviainen + */ +public class WaveformSimilarityBasedOverlapAdd implements AudioProcessor { + private int seekWindowLength; + private int seekLength; + private int overlapLength; + + private float[] pMidBuffer; + private float[] pRefMidBuffer; + private float[] outputFloatBuffer; + + private int intskip; + private int sampleReq; + + private double tempo; + + private AudioDispatcher dispatcher; + + private Parameters newParameters; + + /** + * Create a new instance based on algorithm parameters for a certain audio format. + * @param params The parameters for the algorithm. + */ + public WaveformSimilarityBasedOverlapAdd(Parameters params){ + setParameters(params); + applyNewParameters(); + } + + public void setParameters(Parameters params){ + newParameters = params; + } + + public void setDispatcher(AudioDispatcher newDispatcher){ + this.dispatcher = newDispatcher; + } + + private void applyNewParameters(){ + Parameters params = newParameters; + int oldOverlapLength = overlapLength; + overlapLength = (int) ((params.getSampleRate() * params.getOverlapMs())/1000); + seekWindowLength = (int) ((params.getSampleRate() * params.getSequenceMs())/1000); + seekLength = (int) ((params.getSampleRate() * params.getSeekWindowMs())/1000); + + tempo = params.getTempo(); + + //pMidBuffer and pRefBuffer are initialized with 8 times the needed length to prevent a reset + //of the arrays when overlapLength changes. + + if(overlapLength > oldOverlapLength * 8 && pMidBuffer==null){ + pMidBuffer = new float[overlapLength * 8]; //overlapLengthx2? + pRefMidBuffer = new float[overlapLength * 8];//overlapLengthx2? + System.out.println("New overlapLength" + overlapLength); + } + + double nominalSkip = tempo * (seekWindowLength - overlapLength); + intskip = (int) (nominalSkip + 0.5); + + sampleReq = Math.max(intskip + overlapLength, seekWindowLength) + seekLength; + + float[] prevOutputBuffer = outputFloatBuffer; + outputFloatBuffer = new float[getOutputBufferSize()]; + if(prevOutputBuffer!=null){ + System.out.println("Copy outputFloatBuffer contents"); + for(int i = 0 ; i < prevOutputBuffer.length && i < outputFloatBuffer.length ; i++){ + outputFloatBuffer[i] = prevOutputBuffer[i]; + } + } + + newParameters = null; + } + + public int getInputBufferSize(){ + return sampleReq; + } + + private int getOutputBufferSize(){ + return seekWindowLength - overlapLength; + } + + public int getOverlap(){ + return sampleReq-intskip; + } + + + /** + * Overlaps the sample in output with the samples in input. + * @param output The output buffer. + * @param input The input buffer. + */ + private void overlap(final float[] output, int outputOffset, float[] input,int inputOffset){ + for(int i = 0 ; i < overlapLength ; i++){ + int itemp = overlapLength - i; + output[i + outputOffset] = (input[i + inputOffset] * i + pMidBuffer[i] * itemp ) / overlapLength; + } + } + + + /** + * Seeks for the optimal overlap-mixing position. + * + * The best position is determined as the position where the two overlapped + * sample sequences are 'most alike', in terms of the highest + * cross-correlation value over the overlapping period + * + * @param inputBuffer The input buffer + * @param postion The position where to start the seek operation, in the input buffer. + * @return The best position. + */ + private int seekBestOverlapPosition(float[] inputBuffer, int postion) { + int bestOffset; + double bestCorrelation, currentCorrelation; + int tempOffset; + + int comparePosition; + + // Slopes the amplitude of the 'midBuffer' samples + precalcCorrReferenceMono(); + + bestCorrelation = -10; + bestOffset = 0; + + // Scans for the best correlation value by testing each possible + // position + // over the permitted range. + for (tempOffset = 0; tempOffset < seekLength; tempOffset++) { + + comparePosition = postion + tempOffset; + + // Calculates correlation value for the mixing position + // corresponding + // to 'tempOffset' + currentCorrelation = (double) calcCrossCorr(pRefMidBuffer, inputBuffer,comparePosition); + // heuristic rule to slightly favor values close to mid of the + // range + double tmp = (double) (2 * tempOffset - seekLength) / seekLength; + currentCorrelation = ((currentCorrelation + 0.1) * (1.0 - 0.25 * tmp * tmp)); + + // Checks for the highest correlation value + if (currentCorrelation > bestCorrelation) { + bestCorrelation = currentCorrelation; + bestOffset = tempOffset; + } + } + + return bestOffset; + + } + + /** + * Slopes the amplitude of the 'midBuffer' samples so that cross correlation + * is faster to calculate. Why is this faster? + */ + void precalcCorrReferenceMono() + { + for (int i = 0; i < overlapLength; i++){ + float temp = i * (overlapLength - i); + pRefMidBuffer[i] = pMidBuffer[i] * temp; + } + } + + + double calcCrossCorr(float[] mixingPos, float[] compare, int offset){ + double corr = 0; + double norm = 0; + for (int i = 1; i < overlapLength; i ++){ + corr += mixingPos[i] * compare[i + offset]; + norm += mixingPos[i] * mixingPos[i]; + } + // To avoid division by zero. + if (norm < 1e-8){ + norm = 1.0; + } + return corr / Math.pow(norm,0.5); + } + + + @Override + public boolean process(AudioEvent audioEvent) { + float[] audioFloatBuffer = audioEvent.getFloatBuffer(); + assert audioFloatBuffer.length == getInputBufferSize(); + + //Search for the best overlapping position. + int offset = seekBestOverlapPosition(audioFloatBuffer,0); + + // Mix the samples in the 'inputBuffer' at position of 'offset' with the + // samples in 'midBuffer' using sliding overlapping + // ... first partially overlap with the end of the previous sequence + // (that's in 'midBuffer') + overlap(outputFloatBuffer,0,audioFloatBuffer,offset); + + //copy sequence samples from input to output + int sequenceLength = seekWindowLength - 2 * overlapLength; + System.arraycopy(audioFloatBuffer, offset + overlapLength, outputFloatBuffer, overlapLength, sequenceLength); + + // Copies the end of the current sequence from 'inputBuffer' to + // 'midBuffer' for being mixed with the beginning of the next + // processing sequence and so on + System.arraycopy(audioFloatBuffer, offset + sequenceLength + overlapLength, pMidBuffer, 0, overlapLength); + + assert outputFloatBuffer.length == getOutputBufferSize(); + + audioEvent.setFloatBuffer(outputFloatBuffer); + audioEvent.setOverlap(0); + + if(newParameters!=null){ + applyNewParameters(); + dispatcher.setStepSizeAndOverlap(getInputBufferSize(),getOverlap()); + } + + return true; + } + + @Override + public void processingFinished() { + // NOOP + } + + + + /** + * An object to encapsulate some of the parameters for + * WSOLA, together with a couple of practical helper functions. + * + * @author Joren Six + */ + public static class Parameters { + private final int sequenceMs; + private final int seekWindowMs; + private final int overlapMs; + + private final double tempo; + private final double sampleRate; + + /** + * @param tempo + * The tempo change 1.0 means unchanged, 2.0 is + 100% , 0.5 + * is half of the speed. + * @param sampleRate + * The sample rate of the audio 44.1kHz is common. + * @param newSequenceMs + * Length of a single processing sequence, in milliseconds. + * This determines to how long sequences the original sound + * is chopped in the time-stretch algorithm. + * + * The larger this value is, the lesser sequences are used in + * processing. In principle a bigger value sounds better when + * slowing down tempo, but worse when increasing tempo and + * vice versa. + * + * Increasing this value reduces computational burden and vice + * versa. + * @param newSeekWindowMs + * Seeking window length in milliseconds for algorithm that + * finds the best possible overlapping location. This + * determines from how wide window the algorithm may look for + * an optimal joining location when mixing the sound + * sequences back together. + * + * The bigger this window setting is, the higher the + * possibility to find a better mixing position will become, + * but at the same time large values may cause a "drifting" + * artifact because consequent sequences will be taken at + * more uneven intervals. + * + * If there's a disturbing artifact that sounds as if a + * constant frequency was drifting around, try reducing this + * setting. + * + * Increasing this value increases computational burden and + * vice versa. + * @param newOverlapMs + * Overlap length in milliseconds. When the chopped sound + * sequences are mixed back together, to form a continuous + * sound stream, this parameter defines over how long period + * the two consecutive sequences are let to overlap each + * other. + * + * This shouldn't be that critical parameter. If you reduce + * the DEFAULT_SEQUENCE_MS setting by a large amount, you + * might wish to try a smaller value on this. + * + * Increasing this value increases computational burden and + * vice versa. + */ + public Parameters(double tempo, double sampleRate, int newSequenceMs, int newSeekWindowMs, int newOverlapMs) { + this.tempo = tempo; + this.sampleRate = sampleRate; + this.overlapMs = newOverlapMs; + this.seekWindowMs = newSeekWindowMs; + this.sequenceMs = newSequenceMs; + } + + public static Parameters speechDefaults(double tempo, double sampleRate){ + int sequenceMs = 40; + int seekWindowMs = 15; + int overlapMs = 12; + return new Parameters(tempo,sampleRate,sequenceMs, seekWindowMs,overlapMs); + } + + public static Parameters musicDefaults(double tempo, double sampleRate){ + int sequenceMs = 82; + int seekWindowMs = 28; + int overlapMs = 12; + return new Parameters(tempo,sampleRate,sequenceMs, seekWindowMs,overlapMs); + } + + public static Parameters slowdownDefaults(double tempo, double sampleRate){ + int sequenceMs = 100; + int seekWindowMs = 35; + int overlapMs = 20; + return new Parameters(tempo,sampleRate,sequenceMs, seekWindowMs,overlapMs); + } + + public static Parameters automaticDefaults(double tempo, double sampleRate){ + double tempoLow = 0.5; // -50% speed + double tempoHigh = 2.0; // +100% speed + + double sequenceMsLow = 125; //ms + double sequenceMsHigh = 50; //ms + double sequenceK = ((sequenceMsHigh - sequenceMsLow) / (tempoHigh - tempoLow)); + double sequenceC = sequenceMsLow - sequenceK * tempoLow; + + double seekLow = 25;// ms + double seekHigh = 15;// ms + double seekK =((seekHigh - seekLow) / (tempoHigh-tempoLow)); + double seekC = seekLow - seekK * seekLow; + + int sequenceMs = (int) (sequenceC + sequenceK * tempo + 0.5); + int seekWindowMs = (int) (seekC + seekK * tempo + 0.5); + int overlapMs = 12; + return new Parameters(tempo,sampleRate,sequenceMs, seekWindowMs,overlapMs); + } + + public double getOverlapMs() { + return overlapMs; + } + + public double getSequenceMs() { + return sequenceMs; + } + + public double getSeekWindowMs() { + return seekWindowMs; + } + + public double getSampleRate() { + return sampleRate; + } + + public double getTempo(){ + return tempo; + } + } +} diff --git a/app/src/main/java/be/tarsos/dsp/ZeroCrossingRateProcessor.java b/app/src/main/java/be/tarsos/dsp/ZeroCrossingRateProcessor.java new file mode 100644 index 0000000..a784a5e --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/ZeroCrossingRateProcessor.java @@ -0,0 +1,58 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp; + +/** + * Calculates the zero crossing rate for a frame. + * @author Joren Six + * + */ +public class ZeroCrossingRateProcessor implements AudioProcessor{ + + private float zeroCrossingRate = 0; + @Override + public boolean process(AudioEvent audioEvent) { + float[] buffer = audioEvent.getFloatBuffer(); + + int numberOfZeroCrossings = 0; + for(int i = 1 ; i < buffer.length ; i++){ + if(buffer[i] * buffer[i-1] < 0){ + numberOfZeroCrossings++; + } + } + + zeroCrossingRate = numberOfZeroCrossings / (float) (buffer.length - 1); + + return true; + } + + public float getZeroCrossingRate(){ + return zeroCrossingRate; + } + + @Override + public void processingFinished() { + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/beatroot/Agent.java b/app/src/main/java/be/tarsos/dsp/beatroot/Agent.java new file mode 100644 index 0000000..4fef3f3 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/beatroot/Agent.java @@ -0,0 +1,386 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* BeatRoot: An interactive beat tracking system + Copyright (C) 2001, 2006 by Simon Dixon + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program (the file gpl.txt); if not, download it from + http://www.gnu.org/licenses/gpl.txt or write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +package be.tarsos.dsp.beatroot; + +import java.util.ListIterator; + + +/** Agent is the central class for beat tracking. + * Each Agent object has a tempo hypothesis, a history of tracked beats, and + * a score evaluating the continuity, regularity and salience of its beat track. + */ +public class Agent { + + /** Print debugging information */ + public static boolean debug = false; + + /** The maximum amount by which a beat can be later than the predicted beat time, + * expressed as a fraction of the beat period. */ + public static double POST_MARGIN_FACTOR = 0.3; + + /** The maximum amount by which a beat can be earlier than the predicted beat time, + * expressed as a fraction of the beat period. */ + public static double PRE_MARGIN_FACTOR = 0.15; + + /** The default value of innerMargin, which is the maximum time (in seconds) that a + * beat can deviate from the predicted beat time without a fork occurring. */ + public static final double INNER_MARGIN = 0.040; + + /** The maximum allowed deviation from the initial tempo, expressed as a fraction of the initial beat period. */ + public static double MAX_CHANGE = 0.2; + + /** The slope of the penalty function for onsets which do not coincide precisely with predicted beat times. */ + public static double CONF_FACTOR = 0.5; + + /** The reactiveness/inertia balance, i.e. degree of change in the tempo, is controlled by the correctionFactor + * variable. This constant defines its default value, which currently is not subsequently changed. The + * beat period is updated by the reciprocal of the correctionFactor multiplied by the difference between the + * predicted beat time and matching onset. */ + public static final double DEFAULT_CORRECTION_FACTOR = 50.0; + + /** The default value of expiryTime, which is the time (in seconds) after which an Agent that + * has no Event matching its beat predictions will be destroyed. */ + public static final double DEFAULT_EXPIRY_TIME = 10.0; + + /** The identity number of the next created Agent */ + protected static int idCounter = 0; + + /** The maximum time (in seconds) that a beat can deviate from the predicted beat time + * without a fork occurring (i.e. a 2nd Agent being created). */ + protected static double innerMargin; + + /** Controls the reactiveness/inertia balance, i.e. degree of change in the tempo. The + * beat period is updated by the reciprocal of the correctionFactor multiplied by the difference between the + * predicted beat time and matching onset. */ + protected static double correctionFactor; + + /** The time (in seconds) after which an Agent that + * has no Event matching its beat predictions will be destroyed. */ + protected static double expiryTime; + + /** For scoring Agents in a (non-existent) real-time version (otherwise not used). */ + protected static double decayFactor; + + /** The size of the outer half-window before the predicted beat time. */ + public double preMargin; + + /** The size of the outer half-window after the predicted beat time. */ + public double postMargin; + + /** The Agent's unique identity number. */ + protected int idNumber; + + /** To be used in real-time version?? */ + public double tempoScore; + + /** Sum of salience values of the Events which have been interpreted + * as beats by this Agent, weighted by their nearness to the predicted beat times. */ + public double phaseScore; + + /** How long has this agent been the best? For real-time version; otherwise not used. */ + public double topScoreTime; + + /** The number of beats found by this Agent, including interpolated beats. */ + public int beatCount; + + /** The current tempo hypothesis of the Agent, expressed as the beat period in seconds. */ + public double beatInterval; + + /** The initial tempo hypothesis of the Agent, expressed as the beat period in seconds. */ + public double initialBeatInterval; + + /** The time of the most recent beat accepted by this Agent. */ + public double beatTime; + + /** The list of Events (onsets) accepted by this Agent as beats, plus interpolated beats. */ + public EventList events; + + /** Constructor: the work is performed by init() + * @param ibi The beat period (inter-beat interval) of the Agent's tempo hypothesis. + */ + public Agent(double ibi) { + init(ibi); + } // constructor + + /** Copy constructor. + * @param clone The Agent to duplicate. */ + public Agent(Agent clone) { + idNumber = idCounter++; + phaseScore = clone.phaseScore; + tempoScore = clone.tempoScore; + topScoreTime = clone.topScoreTime; + beatCount = clone.beatCount; + beatInterval = clone.beatInterval; + initialBeatInterval = clone.initialBeatInterval; + beatTime = clone.beatTime; + events = new EventList(clone.events); + postMargin = clone.postMargin; + preMargin = clone.preMargin; + } // copy constructor + + /** Initialise all the fields of this Agent. + * @param ibi The initial tempo hypothesis of the Agent. + */ + protected void init(double ibi) { + innerMargin = INNER_MARGIN; + correctionFactor = DEFAULT_CORRECTION_FACTOR; + expiryTime = DEFAULT_EXPIRY_TIME; + decayFactor = 0; + beatInterval = ibi; + initialBeatInterval = ibi; + postMargin = ibi * POST_MARGIN_FACTOR; + preMargin = ibi * PRE_MARGIN_FACTOR; + idNumber = idCounter++; + phaseScore = 0.0; + tempoScore = 0.0; + topScoreTime = 0.0; + beatCount = 0; + beatTime = -1.0; + events = new EventList(); + } // init() + + /** Output debugging information about this Agent, at the default (highest) level of detail. */ + public void print() { + print(100); + } // print()/0 + + /** Output debugging information about this Agent. + * @param level The level of detail in debugging + */ + public void print(int level) { + System.out.printf("\tAg#%4d: %5.3f", idNumber, beatInterval); + if (level >= 1) { + System.out.printf( + " Beat#%3d Time=%7.3f Score=%4.2f:P%4.2f:%3.1f", + beatCount, beatTime, tempoScore, phaseScore, + topScoreTime); + } + if (level >= 2) + System.out.println(); + if (level >= 3) + events.print(); + } // print() + + /** Accept a new Event as a beat time, and update the state of the Agent accordingly. + * @param e The Event which is accepted as being on the beat. + * @param err The difference between the predicted and actual beat times. + * @param beats The number of beats since the last beat that matched an Event. + */ + protected void accept(Event e, double err, int beats) { + beatTime = e.keyDown; + events.add(e); + if (Math.abs(initialBeatInterval - beatInterval - + err / correctionFactor) < MAX_CHANGE * initialBeatInterval) + beatInterval += err / correctionFactor;// Adjust tempo + beatCount += beats; + double conFactor = 1.0 - CONF_FACTOR * err / + (err>0? postMargin: -preMargin); + if (decayFactor > 0) { + double memFactor = 1. - 1. / threshold((double)beatCount,1,decayFactor); + phaseScore = memFactor * phaseScore + + (1.0 - memFactor) * conFactor * e.salience; + } else + phaseScore += conFactor * e.salience; + if (debug) { + print(1); + System.out.printf(" Err=" + (err<0?"":"+") + "%5.3f" + + (Math.abs(err) > innerMargin ? '*':' ') + "%5.3f\n", + err, conFactor); + } + } // accept() + + private double threshold(double value, double min, double max) { + if (value < min) + return min; + if (value > max) + return max; + return value; + } + + + /** The given Event is tested for a possible beat time. The following situations can occur: + * 1) The Agent has no beats yet; the Event is accepted as the first beat. + * 2) The Event is beyond expiryTime seconds after the Agent's last 'confirming' beat; the Agent is terminated. + * 3) The Event is within the innerMargin of the beat prediction; it is accepted as a beat. + * 4) The Event is within the outerMargin's of the beat prediction; it is accepted as a beat by this Agent, + * and a new Agent is created which doesn't accept it as a beat. + * 5) The Event is ignored because it is outside the windows around the Agent's predicted beat time. + * @param e The Event to be tested + * @param a The list of all agents, which is updated if a new agent is created. + * @return Indicate whether the given Event was accepted as a beat by this Agent. + */ + public boolean considerAsBeat(Event e, AgentList a) { + double err; + if (beatTime < 0) { // first event + accept(e, 0, 1); + return true; + } else { // subsequent events + if (e.keyDown - events.l.getLast().keyDown > expiryTime) { + phaseScore = -1.0; // flag agent to be deleted + return false; + } + double beats = Math.round((e.keyDown - beatTime) / beatInterval); + err = e.keyDown - beatTime - beats * beatInterval; + if ((beats > 0) && (-preMargin <= err) && (err <= postMargin)) { + if (Math.abs(err) > innerMargin) // Create new agent that skips this + a.add(new Agent(this)); // event (avoids large phase jump) + accept(e, err, (int)beats); + return true; + } + } + return false; + } // considerAsBeat() + + /** Interpolates missing beats in the Agent's beat track, starting from the beginning of the piece. */ + protected void fillBeats() { + fillBeats(-1.0); + } // fillBeats()/0 + + /** Interpolates missing beats in the Agent's beat track. + * @param start Ignore beats earlier than this start time + */ + public void fillBeats(double start) { + double prevBeat = 0, nextBeat, currentInterval, beats; + ListIterator list = events.listIterator(); + if (list.hasNext()) { + prevBeat = list.next().keyDown; + // alt. to fill from 0: + // prevBeat = Math.mod(list.next().keyDown, beatInterval); + list.previous(); + } + for ( ; list.hasNext(); list.next()) { + nextBeat = list.next().keyDown; + list.previous(); + beats = Math.round((nextBeat - prevBeat) / beatInterval - 0.01); //prefer slow + currentInterval = (nextBeat - prevBeat) / beats; + for ( ; (nextBeat > start) && (beats > 1.5); beats--) { + prevBeat += currentInterval; + if (debug) + System.out.printf("Insert beat at: %8.3f (n=%1.0f)\n", + prevBeat, beats - 1.0); + list.add(newBeat(prevBeat, 0)); // more than once OK?? + } + prevBeat = nextBeat; + } + } // fillBeats() + + /** Creates a new Event object representing a beat. + * @param time The time of the beat in seconds + * @param beatNum The index of the beat + * @return The Event object representing the beat + */ + private Event newBeat(double time, int beatNum) { + return new Event(time,time, time, 56, 64, beatNum, 0, 1); + } // newBeat() + + /** Show detailed debugging output describing the beat tracking behaviour of this agent. + * Calls showTracking()/1 with a default metrical level of 1. + * @param allEvents An EventList of all onsets + */ + public void showTracking(EventList allEvents) { + showTracking(allEvents, 1.0); + } // showTracking()/1 + + /** Show detailed debugging output describing the beat tracking behaviour of this agent. + * @param allEvents An EventList of all onsets + * @param level The metrical level of beat tracking relative to the notated beat (used to count beats) + */ + public void showTracking(EventList allEvents, double level) { + int count = 1, gapCount; + double prevBeat, nextBeat, gap; + ListIterator beats = events.listIterator(); // point to 1st beat + ListIterator all = allEvents.listIterator(); // point to 1st event + if (!beats.hasNext()) { + System.err.println("No beats found"); + return; + } + prevBeat = events.l.getFirst().keyDown; + // prevBeat = fmod(beats.next().keyDown, beatInterval); + System.out.print("Beat (IBI) BeatTime Other Events"); + boolean first = true; + while (all.hasNext()) { // print each real event + Event currentEvent = all.next(); + Event currentBeat = null; + while (beats.hasNext()) { // if event was chosen as beat + currentBeat = beats.next(); + if (currentBeat.keyDown > currentEvent.keyDown + Induction.clusterWidth) + break; + gap = currentBeat.keyDown - prevBeat; + gapCount = (int) Math.round(gap / beatInterval); + for (int j = 1; j < gapCount; j++) { //empty beat(s) before event + nextBeat = prevBeat + gap / gapCount; + System.out.printf("\n%4d (%5.3f) [%7.3f ]", + count++, nextBeat - prevBeat, nextBeat); + prevBeat = nextBeat; + } + System.out.printf("\n%4d (%5.3f) ", + count++, currentEvent.keyDown - prevBeat); + prevBeat = currentBeat.keyDown; + currentBeat = null; + first = false; + } + if ((currentBeat != null) && (currentBeat.keyDown > currentEvent.keyDown)) { + gap = currentBeat.keyDown - prevBeat; + gapCount = (int) Math.round(gap / beatInterval); + for (int j = 1; j < gapCount; j++) { //empty beat(s) before event + nextBeat = prevBeat + gap / gapCount; + if (nextBeat >= currentEvent.keyDown) + break; + System.out.printf("\n%4d (%5.3f) [%7.3f ]", + count++, nextBeat - prevBeat, nextBeat); + prevBeat = nextBeat; + } + first = false; + } + if (first) // for correct formatting of any initial (pre-beat) events + System.out.print("\n "); + System.out.printf("%8.3f%c ", currentEvent.keyDown, + Math.abs(currentEvent.scoreBeat / level - + Math.round(currentEvent.scoreBeat / level)) < 0.001? + '*': ' '); + first = false; + } + System.out.println(); + } // showTracking() + +} // class Agent diff --git a/app/src/main/java/be/tarsos/dsp/beatroot/AgentList.java b/app/src/main/java/be/tarsos/dsp/beatroot/AgentList.java new file mode 100644 index 0000000..7775635 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/beatroot/AgentList.java @@ -0,0 +1,278 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* BeatRoot: An interactive beat tracking system + Copyright (C) 2001, 2006 by Simon Dixon + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program (the file gpl.txt); if not, download it from + http://www.gnu.org/licenses/gpl.txt or write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +package be.tarsos.dsp.beatroot; + +import java.util.ListIterator; + + +/** Class for maintaining the set of all Agents involved in beat tracking a piece of music. + * Implements a simple linked list terminated by an AgentList with a null Agent (ag). + */ +public class AgentList { + + /** Flag for choice between sum and average beat salience values for Agent scores. + * The use of summed saliences favours faster tempi or lower metrical levels. */ + public static boolean useAverageSalience = false; + + /** Flag for printing debugging output. */ + public static boolean debug = false; + + /** For the purpose of removing duplicate agents, the default JND of IBI */ + public static final double DEFAULT_BI = 0.02; + + /** For the purpose of removing duplicate agents, the default JND of phase */ + public static final double DEFAULT_BT = 0.04; + + /** A beat tracking Agent */ + public Agent ag; + + /** The remainder of the linked list */ + public AgentList next; + + /** The length of the list (number of beat tracking Agents) */ + public static int count = 0; + + /** For the purpose of removing duplicate agents, the JND of IBI. + * Not changed in the current version. */ + public static double thresholdBI = DEFAULT_BI; + + /** For the purpose of removing duplicate agents, the JND of phase. + * Not changed in the current version. */ + public static double thresholdBT = DEFAULT_BT; + + /** Default constructor */ + public AgentList() { + this(null, null); + } + + /** Constructor for an AgentList: the Agent a is prepended to the list al. + * @param a The Agent at the head of the list + * @param al The tail of the list + */ + public AgentList(Agent a, AgentList al) { + ag = a; + next = al; + if (next == null) { + if (ag != null) + next = new AgentList(); // insert null-terminator if it was forgotten + else { + count = 0; + thresholdBI = DEFAULT_BI; + thresholdBT = DEFAULT_BT; + } + } + } // constructor + + /** Deep print of AgentList for debugging */ + public void print() { + System.out.println("agentList.print: (size=" + count + ")"); + for (AgentList ptr = this; ptr.ag != null; ptr = ptr.next) + ptr.ag.print(2); + System.out.println("End of agentList.print()"); + } // print() + + /** + * Inserts newAgent into the list in ascending order of beatInterval + * @param a The new agent to add + */ + public void add(Agent a) { + add(a, true); + } // add()/1 + + /** Appends newAgent to list (sort==false), or inserts newAgent into the list + * in ascending order of beatInterval + * @param newAgent The agent to be added to the list + * @param sort Flag indicating whether the list is sorted or not + */ + public void add(Agent newAgent, boolean sort){ + if (newAgent == null) + return; + AgentList ptr; + count++; + for (ptr = this; ptr.ag != null; ptr = ptr.next) + if (sort && (newAgent.beatInterval <= ptr.ag.beatInterval)) { + ptr.next = new AgentList(ptr.ag, ptr.next); + ptr.ag = newAgent; + return; + } + ptr.next = new AgentList(); + ptr.ag = newAgent; + } // add()/2 + + /** Sorts the AgentList by increasing beatInterval, using a bubble sort + * since it is assumed that the list is almost sorted. */ + public void sort() { + boolean sorted = false; + while (!sorted) { + sorted = true; + for (AgentList ptr = this; ptr.ag != null; ptr = ptr.next) { + if ((ptr.next.ag != null) && + (ptr.ag.beatInterval > ptr.next.ag.beatInterval)) { + Agent temp = ptr.ag; + ptr.ag = ptr.next.ag; + ptr.next.ag = temp; + sorted = false; + } + } // for + } // while + } // sort() + + /** Removes the current item from the list. + * The current item does not need to be the head of the whole list. + * @param ptr Points to the Agent which is removed from the list + */ + public void remove(AgentList ptr) { + count--; + ptr.ag = ptr.next.ag; // null-terminated list always has next + ptr.next = ptr.next.next; + } // remove() + + /** Removes Agents from the list which are duplicates of other Agents. + * A duplicate is defined by the tempo and phase thresholds + * thresholdBI and thresholdBT respectively. + */ + protected void removeDuplicates() { + sort(); + for (AgentList ptr = this; ptr.ag != null; ptr = ptr.next) { + if (ptr.ag.phaseScore < 0.0) // already flagged for deletion + continue; + for (AgentList ptr2 = ptr.next; ptr2.ag != null; ptr2 = ptr2.next) { + if (ptr2.ag.beatInterval - ptr.ag.beatInterval > thresholdBI) + break; + if (Math.abs(ptr.ag.beatTime - ptr2.ag.beatTime) > thresholdBT) + continue; + if (ptr.ag.phaseScore < ptr2.ag.phaseScore) { + ptr.ag.phaseScore = -1.0; // flag for deletion + if (ptr2.ag.topScoreTime < ptr.ag.topScoreTime) + ptr2.ag.topScoreTime = ptr.ag.topScoreTime; + break; + } else { + ptr2.ag.phaseScore = -1.0; // flag for deletion + if (ptr.ag.topScoreTime < ptr2.ag.topScoreTime) + ptr.ag.topScoreTime = ptr2.ag.topScoreTime; + } + } + } + for (AgentList ptr = this; ptr.ag != null; ) { + if (ptr.ag.phaseScore < 0.0) { + remove(ptr); + } else + ptr = ptr.next; + } + } // removeDuplicates() + + /** Perform beat tracking on a list of events (onsets). + * @param el The list of onsets (or events or peaks) to beat track + */ + public void beatTrack(EventList el) { + beatTrack(el, -1.0); + } // beatTrack()/1 + + /** Perform beat tracking on a list of events (onsets). + * @param el The list of onsets (or events or peaks) to beat track. + * @param stop Do not find beats after stop seconds. + */ + public void beatTrack(EventList el, double stop) { + ListIterator ptr = el.listIterator(); + boolean phaseGiven = (ag != null) && + (ag.beatTime >= 0); // if given for one, assume given for others + while (ptr.hasNext()) { + Event ev = ptr.next(); + if ((stop > 0) && (ev.keyDown > stop)) + break; + boolean created = phaseGiven; + double prevBeatInterval = -1.0; + for (AgentList ap = this; ap.ag != null; ap = ap.next) { + Agent currentAgent = ap.ag; + if (currentAgent.beatInterval != prevBeatInterval) { + if ((prevBeatInterval>=0) && !created && (ev.keyDown<5.0)) { + // Create new agent with different phase + Agent newAgent = new Agent(prevBeatInterval); + newAgent.considerAsBeat(ev, this); + add(newAgent); + } + prevBeatInterval = currentAgent.beatInterval; + created = phaseGiven; + } + if (currentAgent.considerAsBeat(ev, this)) + created = true; + if (currentAgent != ap.ag) // new one been inserted, skip it + ap = ap.next; + } // loop for each agent + removeDuplicates(); + } // loop for each event + } // beatTrack() + + /** Finds the Agent with the highest score in the list. + * @return The Agent with the highest score + */ + public Agent bestAgent() { + double best = -1.0; + Agent bestAg = null; + for (AgentList ap = this; ap.ag != null; ap = ap.next) { + double startTime = ap.ag.events.l.getFirst().keyDown; + double conf = (ap.ag.phaseScore + ap.ag.tempoScore) / + (useAverageSalience? (double)ap.ag.beatCount: 1.0); + if (conf > best) { + bestAg = ap.ag; + best = conf; + } + if (debug) { + ap.ag.print(0); + System.out.printf(" +%5.3f Av-salience = %3.1f\n", + startTime, conf); + } + } + if (debug) { + if (bestAg != null) { + System.out.print("Best "); + bestAg.print(0); + System.out.printf(" Av-salience = %5.1f\n", best); + // bestAg.events.print(); + } else + System.out.println("No surviving agent - beat tracking failed"); + } + return bestAg; + } // bestAgent() + +} // class AgentList diff --git a/app/src/main/java/be/tarsos/dsp/beatroot/BeatRootOnsetEventHandler.java b/app/src/main/java/be/tarsos/dsp/beatroot/BeatRootOnsetEventHandler.java new file mode 100644 index 0000000..628477f --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/beatroot/BeatRootOnsetEventHandler.java @@ -0,0 +1,92 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.beatroot; + +import java.util.Iterator; + +import be.tarsos.dsp.onsets.OnsetHandler; + +/** + * Forms a bridge between the BeatRoot beat tracking system and an + * interchangeable onset detector. The beat tracker does not work in real-time. + * First all onsets need to be detected. In a post-processing step a beat + * estimation is done using reocurring inter onset intervals (IOI's). To return + * the time of the beats an OnsetHandler is abused. + * + * @author Joren Six + */ +public class BeatRootOnsetEventHandler implements OnsetHandler { + + private final EventList onsetList = new EventList(); + + @Override + public void handleOnset(double time, double salience) { + double roundedTime = Math.round(time *100 )/100.0; + Event e = newEvent(roundedTime,0); + e.salience = salience; + onsetList.add(e); + } + + + /** + * Creates a new Event object representing an onset or beat. + * + * @param time + * The time of the beat in seconds + * @param beatNum + * The index of the beat or onset. + * @return The Event object representing the beat or onset. + */ + private Event newEvent(double time, int beatNum) { + return new Event(time,time, time, 56, 64, beatNum, 0, 1); + } + + /** + * Guess the beats using the populated list of onsets. + * + * @param beatHandler + * Use this handler to get the time of the beats. The salience of + * the beat is not calculated: -1 is returned. + */ + public void trackBeats(OnsetHandler beatHandler){ + AgentList agents = null; + // tempo not given; use tempo induction + agents = Induction.beatInduction(onsetList); + agents.beatTrack(onsetList, -1); + Agent best = agents.bestAgent(); + if (best != null) { + best.fillBeats(-1.0); + EventList beats = best.events; + Iterator eventIterator = beats.iterator(); + while(eventIterator.hasNext()){ + Event beat = eventIterator.next(); + double time = beat.keyDown; + beatHandler.handleOnset(time, -1); + } + } else { + System.err.println("No best agent"); + } + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/beatroot/Event.java b/app/src/main/java/be/tarsos/dsp/beatroot/Event.java new file mode 100644 index 0000000..e3b8f2a --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/beatroot/Event.java @@ -0,0 +1,123 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* + Copyright (C) 2001, 2006 by Simon Dixon + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program (the file gpl.txt); if not, download it from + http://www.gnu.org/licenses/gpl.txt or write to the + Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +package be.tarsos.dsp.beatroot; + +/** + * A beatroot event + */ +public class Event implements Comparable, Cloneable { + + public double keyDown, keyUp, pedalUp, scoreBeat, scoreDuration, salience; + public int midiPitch, midiVelocity, flags, midiCommand, midiChannel, + midiTrack; + //public String label; + + public Event(double onset, double offset, double eOffset, int pitch, + int velocity, double beat, double duration, int eventFlags, + int command, int channel, int track) { + this(onset, offset, eOffset, pitch, velocity, beat,duration,eventFlags); + midiCommand = command; + midiChannel = channel; + midiTrack = track; + } // constructor + + public Event(double onset, double offset, double eOffset, int pitch, + int velocity, double beat, double duration, int eventFlags) { + keyDown = onset; + keyUp = offset; + pedalUp = eOffset; + midiPitch = pitch; + midiVelocity = velocity; + scoreBeat = beat; + scoreDuration = duration; + flags = eventFlags; + midiCommand = 144;//javax.sound.midi.ShortMessage.NOTE_ON; + midiChannel = 1; + midiTrack = 0; + salience = 0; + } // constructor + + public Event clone() { + return new Event(keyDown, keyUp, pedalUp, midiPitch, midiVelocity, + scoreBeat, scoreDuration, flags, midiCommand, midiChannel, + midiTrack); + } // clone() + + // Interface Comparable + public int compareTo(Event e) { + return (int)Math.signum(keyDown - e.keyDown); + } // compareTo() + + public String toString() { + return "n=" + midiPitch + " v=" + midiVelocity + " t=" + keyDown + + " to " + keyUp + " (" + pedalUp + ")"; + } // toString() + + public void print(Flags f) { + System.out.print("Event:\n"); + System.out.printf("\tkeyDown / Up / pedalUp: %5.3f / %5.3f / %5.3f\n", + keyDown, keyUp, pedalUp); + //System.out.printf("\tkeyUp: %5.3f\n", keyUp); + //System.out.printf("\tpedalUp: %5.3f\n", pedalUp); + System.out.printf("\tmidiPitch: %d\n", midiPitch); + System.out.printf("\tmidiVelocity: %d\n", midiVelocity); + System.out.printf("\tmidiCommand: %02x\t", midiCommand | midiChannel); + //System.out.printf("\tmidiChannel: %d\n", midiChannel); + System.out.printf("\tmidiTrack: %d\n", midiTrack); + System.out.printf("\tsalience: %5.3f\t", salience); + System.out.printf("\tscoreBeat: %5.3f\t", scoreBeat); + System.out.printf("\tscoreDuration: %5.3f\n", scoreDuration); + System.out.printf("\tflags: %X", flags); + if (f != null) { + int ff = flags; + for (int i=0; ff != 0; i++) { + if (ff % 2 == 1) + System.out.print(" " + f.getLabel(i)); + ff >>>= 1; + } + } + System.out.print("\n\n"); + } // print() + +} // class Event diff --git a/app/src/main/java/be/tarsos/dsp/beatroot/EventList.java b/app/src/main/java/be/tarsos/dsp/beatroot/EventList.java new file mode 100644 index 0000000..071e1f1 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/beatroot/EventList.java @@ -0,0 +1,433 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* + Copyright (C) 2001, 2006 by Simon Dixon + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program (the file gpl.txt); if not, download it from + http://www.gnu.org/licenses/gpl.txt or write to the + Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +package be.tarsos.dsp.beatroot; + +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.ListIterator; + + + +// Adapted from eventList::readMatchFile in beatroot/src/eventMidi.cpp + +// Reads in a Prolog score+performance (.match) file; returns it as an eventList +// Lines in the match file can be of the form: +// hammer_bounce-PlayedNote. +// info(Attribute, Value). +// insertion-PlayedNote. +// ornament(Anchor)-PlayedNote. +// ScoreNote-deletion. +// ScoreNote-PlayedNote. +// ScoreNote-trailing_score_note. +// trailing_played_note-PlayedNote. +// trill(Anchor)-PlayedNote. +// where ScoreNote is of the form +// snote(Anchor,[NoteName,Modifier],Octave,Bar:Beat,Offset,Duration, +// BeatNumber,DurationInBeats,ScoreAttributesList) +// e.g. snote(n1,[b,b],5,1:1,0,3/16,0,0.75,[s]) +// and PlayedNote is of the form +// note(Number,[NoteName,Modifier],Octave,Onset,Offset,AdjOffset,Velocity) +// e.g. note(1,[a,#],5,5054,6362,6768,53) + +class WormFileParseException extends RuntimeException { + + static final long serialVersionUID = 0; + public WormFileParseException(String s) { + super(s); + } // constructor + +} // class WormFileParseException + +class MatchFileParseException extends RuntimeException { + + static final long serialVersionUID = 0; + public MatchFileParseException(String s) { + super(s); + } // constructor + +} // class MatchFileParseException + +class BTFileParseException extends RuntimeException { + + static final long serialVersionUID = 0; + public BTFileParseException(String s) { + super(s); + } // constructor + +} // class BTFileParseException + + +// Process the strings which label extra features of notes in match files. +// We assume no more than 32 distinct labels in a file. +class Flags { + + String[] labels = new String[32]; + int size = 0; + + int getFlag(String s) { + if ((s == null) || s.equals("")) + return 0; + //int val = 1; + for (int i = 0; i < size; i++) + if (s.equals(labels[i])) + return 1 << i; + if (size == 32) { + System.err.println("Overflow: Too many flags: " + s); + size--; + } + labels[size] = s; + return 1 << size++; + } // getFlag() + + String getLabel(int i) { + if (i >= size) + return "ERROR: Unknown flag"; + return labels[i]; + } // getLabel() + +} // class Flags + +/** + * A score/match/midi file is represented as an EventList object, + * which contains pointers to the head and tail links, and some + * class-wide parameters. Parameters are class-wide, as it is + * assumed that the Worm has only one input file at a time. + */ +public class EventList { + + public LinkedList l; + + private static boolean timingCorrection = false; + private static double timingDisplacement = 0; + private static final int clockUnits = 480; + private static final int clockRate = 500000; + private static final double metricalLevel = 0; + private static final double UNKNOWN = Double.NaN; + private static final boolean noMelody = false; + private static final boolean onlyMelody = false; + private static final Flags flags = new Flags(); + + public EventList() { + l = new LinkedList(); + } // constructor + + public EventList(EventList e) { + this(); + ListIterator it = e.listIterator(); + while (it.hasNext()) + add(it.next()); + } // constructor + + public EventList(Event[] e) { + this(); + for (int i=0; i < e.length; i++) + add(e[i]); + } // constructor + + public void add(Event e) { + l.add(e); + } // add() + + public void add(EventList ev) { + l.addAll(ev.l); + } // add() + + public void insert(Event newEvent, boolean uniqueTimes) { + ListIterator li = l.listIterator(); + while (li.hasNext()) { + int sgn = newEvent.compareTo(li.next()); + if (sgn < 0) { + li.previous(); + break; + } else if (uniqueTimes && (sgn == 0)) { + li.remove(); + break; + } + } + li.add(newEvent); + } // insert() + + public ListIterator listIterator() { + return l.listIterator(); + } // listIterator() + + public Iterator iterator() { + return l.iterator(); + } // iterator() + + public int size() { + return l.size(); + } // size() + + public Event[] toArray() { + return toArray(0); + } // toArray() + + public double[] toOnsetArray() { + double[] d = new double[l.size()]; + int i = 0; + for (Iterator it = l.iterator(); it.hasNext(); i++) + d[i] = it.next().keyDown; + return d; + } // toOnsetArray() + + public Event[] toArray(int match) { + int count = 0; + for (Event e : l) + if ((match == 0) || (e.midiCommand == match)) + count++; + Event[] a = new Event[count]; + int i = 0; + for (Event e : l) + if ((match == 0) || (e.midiCommand == match)) + a[i++] = e; + return a; + } // toArray() + + public void writeBinary(String fileName) { + try { + ObjectOutputStream oos = new ObjectOutputStream( + new FileOutputStream(fileName)); + oos.writeObject(this); + oos.close(); + } catch (IOException e) { + System.err.println(e); + } + } // writeBinary() + + public static EventList readBinary(String fileName) { + try { + ObjectInputStream ois = new ObjectInputStream( + new FileInputStream(fileName)); + EventList e = (EventList) ois.readObject(); + ois.close(); + return e; + } catch (IOException e) { + System.err.println(e); + return null; + } catch (ClassNotFoundException e) { + System.err.println(e); + return null; + } + } // readBinary() + + /* + public void writeMIDI(String fileName) { + writeMIDI(fileName, null); + } // writeMIDI() + + public void writeMIDI(String fileName, EventList pedal) { + try { + MidiSystem.write(toMIDI(pedal), 1, new File(fileName)); + } catch (Exception e) { + System.err.println("Error: Unable to write MIDI file " + fileName); + e.printStackTrace(); + } + } // writeMIDI() + + public Sequence toMIDI(EventList pedal) throws InvalidMidiDataException { + final int midiTempo = 1000000; + Sequence s = new Sequence(Sequence.PPQ, 1000); + Track[] tr = new Track[16]; + tr[0] = s.createTrack(); + MetaMessage mm = new MetaMessage(); + byte[] b = new byte[3]; + b[0] = (byte)((midiTempo >> 16) & 0xFF); + b[1] = (byte)((midiTempo >> 8) & 0xFF); + b[2] = (byte)(midiTempo & 0xFF); + mm.setMessage(0x51, b, 3); + tr[0].add(new MidiEvent(mm, 0L)); + for (Event e : l) { // from match or beatTrack file + if (e.midiCommand == 0) // skip beatTrack file + break; + if (tr[e.midiTrack] == null) + tr[e.midiTrack] = s.createTrack(); + //switch (e.midiCommand) + //case ShortMessage.NOTE_ON: + //case ShortMessage.POLY_PRESSURE: + //case ShortMessage.CONTROL_CHANGE: + //case ShortMessage.PROGRAM_CHANGE: + //case ShortMessage.CHANNEL_PRESSURE: + //case ShortMessage.PITCH_BEND: + ShortMessage sm = new ShortMessage(); + sm.setMessage(e.midiCommand, e.midiChannel, + e.midiPitch, e.midiVelocity); + tr[e.midiTrack].add(new MidiEvent(sm, + (long)Math.round(1000 * e.keyDown))); + if (e.midiCommand == ShortMessage.NOTE_ON) { + sm = new ShortMessage(); + sm.setMessage(ShortMessage.NOTE_OFF, e.midiChannel, e.midiPitch, 0); + tr[e.midiTrack].add(new MidiEvent(sm, (long)Math.round(1000 * e.keyUp))); + } + } + if (pedal != null) { // from MIDI file + // if (t.size() > 0) // otherwise beatTrack files leave an empty trk + // t = s.createTrack(); + for (Event e : pedal.l) { + if (tr[e.midiTrack] == null) + tr[e.midiTrack] = s.createTrack(); + ShortMessage sm = new ShortMessage(); + sm.setMessage(e.midiCommand, e.midiChannel, + e.midiPitch, e.midiVelocity); + tr[e.midiTrack].add(new MidiEvent(sm, + (long)Math.round(1000 * e.keyDown))); + if (e.midiCommand == ShortMessage.NOTE_ON) { + sm = new ShortMessage(); + sm.setMessage(ShortMessage.NOTE_OFF, e.midiChannel, + e.midiPitch,e.midiVelocity); + tr[e.midiTrack].add(new MidiEvent(sm, + (long)Math.round(1000 * e.keyUp))); + } + //catch (InvalidMidiDataException exception) {} + } + } + return s; + } // toMIDI() + + public static EventList readMidiFile(String fileName) { + return readMidiFile(fileName, 0); + } // readMidiFile() + + public static EventList readMidiFile(String fileName, int skipTrackFlag) { + EventList list = new EventList(); + Sequence s; + try { + s = MidiSystem.getSequence(new File(fileName)); + } catch (Exception e) { + e.printStackTrace(); + return list; + } + double midiTempo = 500000; + double tempoFactor = midiTempo / s.getResolution() / 1000000.0; + // System.err.println(tempoFactor); + Event[][] noteOns = new Event[128][16]; + Track[] tracks = s.getTracks(); + for (int t = 0; t < tracks.length; t++, skipTrackFlag >>= 1) { + if ((skipTrackFlag & 1) == 1) + continue; + for (int e = 0; e < tracks[t].size(); e++) { + MidiEvent me = tracks[t].get(e); + MidiMessage mm = me.getMessage(); + double time = me.getTick() * tempoFactor; + byte[] mesg = mm.getMessage(); + int channel = mesg[0] & 0x0F; + int command = mesg[0] & 0xF0; + if (command == ShortMessage.NOTE_ON) { + int pitch = mesg[1] & 0x7F; + int velocity = mesg[2] & 0x7F; + if (noteOns[pitch][channel] != null) { + if (velocity == 0) { // NOTE_OFF in disguise :( + noteOns[pitch][channel].keyUp = time; + noteOns[pitch][channel].pedalUp = time; + noteOns[pitch][channel] = null; + } else + System.err.println("Double note on: n=" + pitch + + " c=" + channel + + " t1=" + noteOns[pitch][channel] + + " t2=" + time); + } else { + Event n = new Event(time, 0, 0, pitch, velocity, -1, -1, + 0, ShortMessage.NOTE_ON, channel, t); + noteOns[pitch][channel] = n; + list.add(n); + } + } else if (command == ShortMessage.NOTE_OFF) { + int pitch = mesg[1] & 0x7F; + noteOns[pitch][channel].keyUp = time; + noteOns[pitch][channel].pedalUp = time; + noteOns[pitch][channel] = null; + } else if (command == 0xF0) { + if ((channel == 0x0F) && (mesg[1] == 0x51)) { + midiTempo = (mesg[5] & 0xFF) | + ((mesg[4] & 0xFF) << 8) | + ((mesg[3] & 0xFF) << 16); + tempoFactor = midiTempo / s.getResolution() / 1000000.0; + // System.err.println("Info: Tempo change: " + midiTempo + + // " tf=" + tempoFactor); + } + } else if (mesg.length > 3) { + System.err.println("midi message too long: " + mesg.length); + System.err.println("\tFirst byte: " + mesg[0]); + } else { + int b0 = mesg[0] & 0xFF; + int b1 = -1; + int b2 = -1; + if (mesg.length > 1) + b1 = mesg[1] & 0xFF; + if (mesg.length > 2) + b2 = mesg[2] & 0xFF; + list.add(new Event(time, time, -1, b1, b2, -1, -1, 0, + b0 & 0xF0, b0 & 0x0F, t)); + } + } + } + for (int pitch = 0; pitch < 128; pitch++) + for (int channel = 0; channel < 16; channel++) + if (noteOns[pitch][channel] != null) + System.err.println("Missing note off: n=" + + noteOns[pitch][channel].midiPitch + " t=" + + noteOns[pitch][channel].keyDown); + return list; + } // readMidiFile() +*/ + public void print() { + for (Iterator i = l.iterator(); i.hasNext(); ) + i.next().print(flags); + } // print() + + public static void setTimingCorrection(double corr) { + timingCorrection = corr >= 0; + timingDisplacement = corr; + } // setTimingCorrection() + + + + + +} // class EventList diff --git a/app/src/main/java/be/tarsos/dsp/beatroot/Induction.java b/app/src/main/java/be/tarsos/dsp/beatroot/Induction.java new file mode 100644 index 0000000..85f6c41 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/beatroot/Induction.java @@ -0,0 +1,355 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* BeatRoot: An interactive beat tracking system + Copyright (C) 2001, 2006 by Simon Dixon + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program (the file gpl.txt); if not, download it from + http://www.gnu.org/licenses/gpl.txt or write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +package be.tarsos.dsp.beatroot; + +import java.util.ListIterator; + +/** Performs tempo induction by finding clusters of similar + * inter-onset intervals (IOIs), ranking them according to the number + * of intervals and relationships between them, and returning a set + * of tempo hypotheses for initialising the beat tracking agents. + */ +public class Induction { + + /** The maximum difference in IOIs which are in the same cluster */ + public static double clusterWidth = 0.025; + + /** The minimum IOI for inclusion in a cluster */ + public static double minIOI = 0.070; + + /** The maximum IOI for inclusion in a cluster */ + public static double maxIOI = 2.500; + + /** The minimum inter-beat interval (IBI), i.e. the maximum tempo + * hypothesis that can be returned. + * 0.30 seconds == 200 BPM + * 0.25 seconds == 240 BPM + */ + public static double minIBI = 0.3; + + /** The maximum inter-beat interval (IBI), i.e. the minimum tempo + * hypothesis that can be returned. + * 1.00 seconds == 60 BPM + * 0.75 seconds == 80 BPM + * 0.60 seconds == 100 BPM + */ + public static double maxIBI = 1.0; // 60BPM // was 0.75 => 80 + + /** The maximum number of tempo hypotheses to return */ + public static int topN = 10; + + /** Flag to enable debugging output */ + public static boolean debug = false; + + /** Performs tempo induction (see JNMR 2001 paper by Simon Dixon for details). + * @param events The onsets (or other events) from which the tempo is induced + * @return A list of beat tracking agents, where each is initialised with one + * of the top tempo hypotheses but no beats + */ + public static AgentList beatInduction(EventList events) { + int i, j, b, bestCount; + boolean submult; + int intervals = 0; // number of interval clusters + int[] bestn = new int[topN];// count of high-scoring clusters + double ratio, err; + int degree; + int maxClusterCount = (int) Math.ceil((maxIOI - minIOI) / clusterWidth); + double[] clusterMean = new double[maxClusterCount]; + int[] clusterSize = new int[maxClusterCount]; + int[] clusterScore = new int[maxClusterCount]; + + ListIterator ptr1, ptr2; + Event e1,e2; + ptr1 = events.listIterator(); + while (ptr1.hasNext()) { + e1 = ptr1.next(); + ptr2 = events.listIterator(); + e2 = ptr2.next(); + while (e2 != e1) + e2 = ptr2.next(); + while (ptr2.hasNext()) { + e2 = ptr2.next(); + double ioi = e2.keyDown - e1.keyDown; + if (ioi < minIOI) // skip short intervals + continue; + if (ioi > maxIOI) // ioi too long + break; + for (b = 0; b < intervals; b++) // assign to nearest cluster + if (Math.abs(clusterMean[b] - ioi) < clusterWidth) { + if ((b < intervals - 1) && ( + Math.abs(clusterMean[b+1] - ioi) < + Math.abs(clusterMean[b] - ioi))) + b++; // next cluster is closer + clusterMean[b] = (clusterMean[b] * clusterSize[b] +ioi)/ + (clusterSize[b] + 1); + clusterSize[b]++; + break; + } + if (b == intervals) { // no suitable cluster; create new one + if (intervals == maxClusterCount) { + System.err.println("Warning: Too many clusters"); + continue; // ignore this IOI + } + intervals++; + for ( ; (b>0) && (clusterMean[b-1] > ioi); b--) { + clusterMean[b] = clusterMean[b-1]; + clusterSize[b] = clusterSize[b-1]; + } + clusterMean[b] = ioi; + clusterSize[b] = 1; + } + } + } + if (debug) { // output IOI histogram in Matlab format + System.out.println("Inter-onset interval histogram:\n" + + "StartMatlabCode\n" + + "ioi = ["); + for (b = 0; b < intervals; b++) + System.out.printf("%4d %7.3f %7d\n", + b, clusterMean[b], clusterSize[b]); + System.out.println("]; ioiclusters(ioi, name);\nEndMatlabCode\n"); + } + for (b = 0; b < intervals; b++) // merge similar intervals + // TODO: they are now in order, so don't need the 2nd loop + // TODO: check BOTH sides before averaging or upper gps don't work + for (i = b+1; i < intervals; i++) + if (Math.abs(clusterMean[b] - clusterMean[i]) < clusterWidth) { + clusterMean[b] = (clusterMean[b] * clusterSize[b] + + clusterMean[i] * clusterSize[i]) / + (clusterSize[b] + clusterSize[i]); + clusterSize[b] = clusterSize[b] + clusterSize[i]; + --intervals; + for (j = i+1; j <= intervals; j++) { + clusterMean[j-1] = clusterMean[j]; + clusterSize[j-1] = clusterSize[j]; + } + } + if (intervals == 0) + return new AgentList(); + for (b = 0; b < intervals; b++) + clusterScore[b] = 10 * clusterSize[b]; + bestn[0] = 0; + bestCount = 1; + for (b = 0; b < intervals; b++) + for (i = 0; i <= bestCount; i++) + if ((i < topN) && ((i == bestCount) || + (clusterScore[b] > clusterScore[bestn[i]]))){ + if (bestCount < topN) + bestCount++; + for (j = bestCount - 1; j > i; j--) + bestn[j] = bestn[j-1]; + bestn[i] = b; + break; + } + if (debug) { + System.out.println("Best " + bestCount + " clusters (before):"); + for (b = 0; b < bestCount; b++) + System.out.printf("%5.3f : %5d\n", clusterMean[bestn[b]], + clusterScore[bestn[b]]); + } + for (b = 0; b < intervals; b++) // score intervals + for (i = b+1; i < intervals; i++) { + ratio = clusterMean[b] / clusterMean[i]; + submult = ratio < 1; + if (submult) + degree = (int) Math.round(1/ratio); + else + degree = (int) Math.round(ratio); + if ((degree >= 2) && (degree <= 8)) { + if (submult) + err = Math.abs(clusterMean[b]*degree - clusterMean[i]); + else + err = Math.abs(clusterMean[b] - clusterMean[i]*degree); + if (err < (submult? clusterWidth : clusterWidth * degree)) { + if (degree >= 5) + degree = 1; + else + degree = 6 - degree; + clusterScore[b] += degree * clusterSize[i]; + clusterScore[i] += degree * clusterSize[b]; + } + } + } + if (debug) { + System.out.println("Best " + bestCount + " clusters (after):"); + for (b = 0; (b < bestCount); b++) + System.out.printf("%5.3f : %5d\n", clusterMean[bestn[b]], + clusterScore[bestn[b]]); + } + if (debug) { + System.out.println("Inter-onset interval histogram 2:"); + for (b = 0; b < intervals; b++) + System.out.printf("%3d: %5.3f : %3d (score: %5d)\n", + b, clusterMean[b], clusterSize[b], clusterScore[b]); + } + + AgentList a = new AgentList(); + for (int index = 0; index < bestCount; index++) { + b = bestn[index]; + // Adjust it, using the size of super- and sub-intervals + double newSum = clusterMean[b] * clusterScore[b]; + //int newCount = clusterSize[b]; + int newWeight = clusterScore[b]; + for (i = 0; i < intervals; i++) { + if (i == b) + continue; + ratio = clusterMean[b] / clusterMean[i]; + if (ratio < 1) { + degree = (int) Math.round(1 / ratio); + if ((degree >= 2) && (degree <= 8)) { + err = Math.abs(clusterMean[b]*degree - clusterMean[i]); + if (err < clusterWidth) { + newSum += clusterMean[i] / degree * clusterScore[i]; + //newCount += clusterSize[i]; + newWeight += clusterScore[i]; + } + } + } else { + degree = (int) Math.round(ratio); + if ((degree >= 2) && (degree <= 8)) { + err = Math.abs(clusterMean[b] - degree*clusterMean[i]); + if (err < clusterWidth * degree) { + newSum += clusterMean[i] * degree * clusterScore[i]; + //newCount += clusterSize[i]; + newWeight += clusterScore[i]; + } + } + } + } + double beat = newSum / newWeight; + // Scale within range ... hope the grouping isn't ternary :( + while (beat < minIBI) // Maximum speed + beat *= 2.0; + while (beat > maxIBI) // Minimum speed + beat /= 2.0; + if (beat >= minIBI) { + a.add(new Agent(beat)); + if (debug) + System.out.printf(" %5.3f", beat); + } + } + if (debug) + System.out.println(" IBI"); + return a; + } // beatInduction() + + /** For variable cluster widths in newInduction(). + * @param low The lowest IOI allowed in the cluster + * @return The highest IOI allowed in the cluster + */ + protected static int top(int low) { + return low + 25; // low/10; + } // top() + + /** An alternative (incomplete) tempo induction method (not used). + * Uses integer (millisecond) resolution. + * @param events The events on which tempo induction is performed + */ + public static void newInduction(EventList events) { + final int MAX_MS = 2500; + int[] count = new int[MAX_MS]; + for (int i=0; i < MAX_MS; i++) + count[i] = 0; + ListIterator ptr1, ptr2; + Event e1,e2; + ptr1 = events.listIterator(); + while (ptr1.hasNext()) { + e1 = ptr1.next(); + ptr2 = events.listIterator(); + e2 = ptr2.next(); + while (e2 != e1) + e2 = ptr2.next(); + while (ptr2.hasNext()) { + e2 = ptr2.next(); + int diff = (int) Math.round((e1.keyDown - e2.keyDown) * 1000); + if (diff < MAX_MS) + count[diff]++; + else + break; + } + } + int clnum; + final int MAX_CL = 10; + int[] cluster = new int[MAX_CL]; + int[] csize = new int[MAX_CL]; + for (clnum = 0; clnum < MAX_CL; clnum++) { + int sum = 0; + int max = 0; + int maxp = 0; + int hi = 70; + int lo = hi; + while (hi < MAX_MS) { + if (hi >= top(lo)) + sum -= count[lo++]; + else { + sum += count[hi++]; + if (sum > max) { + max = sum; + maxp = lo; + } + } + } + if (max == 0) + break; + hi = top(maxp); + if (hi > MAX_MS) + hi = MAX_MS; + int cnt = sum = 0; + for (lo = maxp; lo < hi; lo++) { + sum += lo * count[lo]; + cnt += count[lo]; + count[lo] = 0; + } + if (cnt != max) + System.err.println("Rounding error in newInduction"); + cluster[clnum] = sum / cnt; + csize[clnum] = cnt; + System.out.printf(" %5.3f", sum / 1000.0 / cnt); + //System.out.println("Cluster " + (clnum+1) ": " + (sum/cnt) + + // "ms (" + cnt + " intervals)"); + } + System.out.println(" IBI"); + // System.out.println("END OF NEW_INDUCTION"); + } // newInduction() + +} // class Induction diff --git a/app/src/main/java/be/tarsos/dsp/beatroot/Peaks.java b/app/src/main/java/be/tarsos/dsp/beatroot/Peaks.java new file mode 100644 index 0000000..150e4af --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/beatroot/Peaks.java @@ -0,0 +1,252 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* + Copyright (C) 2001, 2006 by Simon Dixon + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program (the file gpl.txt); if not, download it from + http://www.gnu.org/licenses/gpl.txt or write to the + Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +*/ + +package be.tarsos.dsp.beatroot; + +import java.util.LinkedList; + +public class Peaks { + + public static boolean debug = false; + public static int pre = 3; + public static int post = 1; + + /** + * General peak picking method for finding n local maxima in an array + * @param data input data + * @param peaks list of peak indexes + * @param width minimum distance between peaks + * @return The number of peaks found + */ + public static int findPeaks(double[] data, int[] peaks, int width) { + int peakCount = 0; + int maxp = 0; + int mid = 0; + int end = data.length; + while (mid < end) { + int i = mid - width; + if (i < 0) + i = 0; + int stop = mid + width + 1; + if (stop > data.length) + stop = data.length; + maxp = i; + for (i++; i < stop; i++) + if (data[i] > data[maxp]) + maxp = i; + if (maxp == mid) { + int j; + for (j = peakCount; j > 0; j--) { + if (data[maxp] <= data[peaks[j-1]]) + break; + else if (j < peaks.length) + peaks[j] = peaks[j-1]; + } + if (j != peaks.length) + peaks[j] = maxp; + if (peakCount != peaks.length) + peakCount++; + } + mid++; + } + return peakCount; + } // findPeaks() + + /** General peak picking method for finding local maxima in an array + * @param data input data + * @param width minimum distance between peaks + * @param threshold minimum value of peaks + * @return list of peak indexes + */ + public static LinkedList findPeaks(double[] data, int width, + double threshold) { + return findPeaks(data, width, threshold, 0, false); + } // findPeaks() + + /** General peak picking method for finding local maxima in an array + * @param data input data + * @param width minimum distance between peaks + * @param threshold minimum value of peaks + * @param decayRate how quickly previous peaks are forgotten + * @param isRelative minimum value of peaks is relative to local average + * @return list of peak indexes + */ + public static LinkedList findPeaks(double[] data, int width, + double threshold, double decayRate, boolean isRelative) { + LinkedList peaks = new LinkedList(); + int maxp = 0; + int mid = 0; + int end = data.length; + double av = data[0]; + while (mid < end) { + av = decayRate * av + (1 - decayRate) * data[mid]; + if (av < data[mid]) + av = data[mid]; + int i = mid - width; + if (i < 0) + i = 0; + int stop = mid + width + 1; + if (stop > data.length) + stop = data.length; + maxp = i; + for (i++; i < stop; i++) + if (data[i] > data[maxp]) + maxp = i; + if (maxp == mid) { + if (overThreshold(data, maxp, width, threshold, isRelative,av)){ + if (debug) + System.out.println(" peak"); + peaks.add(Integer.valueOf(maxp)); + } else if (debug) + System.out.println(); + } + mid++; + } + return peaks; + } // findPeaks() + + public static double expDecayWithHold(double av, double decayRate, + double[] data, int start, int stop) { + while (start < stop) { + av = decayRate * av + (1 - decayRate) * data[start]; + if (av < data[start]) + av = data[start]; + start++; + } + return av; + } // expDecayWithHold() + + public static boolean overThreshold(double[] data, int index, int width, + double threshold, boolean isRelative, + double av) { + if (debug) + System.out.printf("%4d : %6.3f Av1: %6.3f ", + index, data[index], av); + if (data[index] < av) + return false; + if (isRelative) { + int iStart = index - pre * width; + if (iStart < 0) + iStart = 0; + int iStop = index + post * width; + if (iStop > data.length) + iStop = data.length; + double sum = 0; + int count = iStop - iStart; + while (iStart < iStop) + sum += data[iStart++]; + if (debug) + System.out.printf(" %6.3f %6.3f ", sum / count, + data[index] - sum / count - threshold); + return (data[index] > sum / count + threshold); + } else + return (data[index] > threshold); + } // overThreshold() + + public static void normalise(double[] data) { + double sx = 0; + double sxx = 0; + for (int i = 0; i < data.length; i++) { + sx += data[i]; + sxx += data[i] * data[i]; + } + double mean = sx / data.length; + double sd = Math.sqrt((sxx - sx * mean) / data.length); + if (sd == 0) + sd = 1; // all data[i] == mean -> 0; avoids div by 0 + for (int i = 0; i < data.length; i++) { + data[i] = (data[i] - mean) / sd; + } + } // normalise() + + /** Uses an n-point linear regression to estimate the slope of data. + * @param data input data + * @param hop spacing of data points + * @param n length of linear regression + * @param slope output data + */ + public static void getSlope(double[] data, double hop, int n, + double[] slope) { + int i = 0, j = 0; + double t; + double sx = 0, sxx = 0, sy = 0, sxy = 0; + for ( ; i < n; i++) { + t = i * hop; + sx += t; + sxx += t * t; + sy += data[i]; + sxy += t * data[i]; + } + double delta = n * sxx - sx * sx; + for ( ; j < n / 2; j++) + slope[j] = (n * sxy - sx * sy) / delta; + for ( ; j < data.length - (n + 1) / 2; j++, i++) { + slope[j] = (n * sxy - sx * sy) / delta; + sy += data[i] - data[i - n]; + sxy += hop * (n * data[i] - sy); + } + for ( ; j < data.length; j++) + slope[j] = (n * sxy - sx * sy) / delta; + } // getSlope() + + public static double min(double[] arr) { return arr[imin(arr)]; } + + public static double max(double[] arr) { return arr[imax(arr)]; } + + public static int imin(double[] arr) { + int i = 0; + for (int j = 1; j < arr.length; j++) + if (arr[j] < arr[i]) + i = j; + return i; + } // imin() + + public static int imax(double[] arr) { + int i = 0; + for (int j = 1; j < arr.length; j++) + if (arr[j] > arr[i]) + i = j; + return i; + } // imax() + +} // class Peaks diff --git a/app/src/main/java/be/tarsos/dsp/beatroot/package-info.java b/app/src/main/java/be/tarsos/dsp/beatroot/package-info.java new file mode 100644 index 0000000..1f92db2 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/beatroot/package-info.java @@ -0,0 +1,28 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/** + * Contains classes imported from the Beatroot project by Simon Dixon. + */ +package be.tarsos.dsp.beatroot; diff --git a/app/src/main/java/be/tarsos/dsp/effects/DelayEffect.java b/app/src/main/java/be/tarsos/dsp/effects/DelayEffect.java new file mode 100644 index 0000000..1e6fd1a --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/effects/DelayEffect.java @@ -0,0 +1,119 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +package be.tarsos.dsp.effects; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; + + +/** + *

+ * Adds an echo effect to the signal. + *

+ * + * @author Joren Six + */ +public class DelayEffect implements AudioProcessor { + + private final double sampleRate; + private float[] echoBuffer;//in seconds + private int position; + private float decay; + + private double newEchoLength; + + /** + * @param echoLength in seconds + * @param sampleRate the sample rate in Hz. + * @param decay The decay of the echo, a value between 0 and 1. 1 meaning no decay, 0 means immediate decay (not echo effect). + */ + public DelayEffect(double echoLength,double decay,double sampleRate) { + this.sampleRate = sampleRate; + setDecay(decay); + setEchoLength(echoLength); + applyNewEchoLength(); + } + + /** + * @param newEchoLength A new echo buffer length in seconds. + */ + public void setEchoLength(double newEchoLength){ + this.newEchoLength = newEchoLength; + } + + private void applyNewEchoLength(){ + if(newEchoLength != -1){ + + //create a new buffer with the information of the previous buffer + float[] newEchoBuffer = new float[(int) (sampleRate * newEchoLength)]; + if(echoBuffer != null){ + for(int i = 0 ; i < newEchoBuffer.length; i++){ + if(position >= echoBuffer.length){ + position = 0; + } + newEchoBuffer[i] = echoBuffer[position]; + position++; + } + } + this.echoBuffer = newEchoBuffer; + newEchoLength = -1; + } + } + + /** + * A decay, should be a value between zero and one. + * @param newDecay the new decay (preferably between zero and one). + */ + public void setDecay(double newDecay){ + this.decay = (float) newDecay; + } + + @Override + public boolean process(AudioEvent audioEvent) { + float[] audioFloatBuffer = audioEvent.getFloatBuffer(); + int overlap = audioEvent.getOverlap(); + + for(int i = overlap ; i < audioFloatBuffer.length ; i++){ + if(position >= echoBuffer.length){ + position = 0; + } + + //output is the input added with the decayed echo + audioFloatBuffer[i] = audioFloatBuffer[i] + echoBuffer[position] * decay; + //store the sample in the buffer; + echoBuffer[position] = audioFloatBuffer[i]; + + position++; + } + + applyNewEchoLength(); + + return true; + } + + @Override + public void processingFinished() { + } +} diff --git a/app/src/main/java/be/tarsos/dsp/effects/FlangerEffect.java b/app/src/main/java/be/tarsos/dsp/effects/FlangerEffect.java new file mode 100644 index 0000000..ca6b2ad --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/effects/FlangerEffect.java @@ -0,0 +1,199 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* + * _______ _____ _____ _____ + * |__ __| | __ \ / ____| __ \ + * | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | + * | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ + * | | (_| | | \__ \ (_) \__ \ |__| |____) | | + * |_|\__,_|_| |___/\___/|___/_____/|_____/|_| + * + * ----------------------------------------------------------- + * + * TarsosDSP is developed by Joren Six at + * The School of Arts, + * University College Ghent, + * Hoogpoort 64, 9000 Ghent - Belgium + * + * ----------------------------------------------------------- + * + * Info: http://tarsos.0110.be/tag/TarsosDSP + * Github: https://github.com/JorenSix/TarsosDSP + * Releases: http://tarsos.0110.be/releases/TarsosDSP/ + * + * TarsosDSP includes modified source code by various authors, + * for credits and info, see README. + * + */ + +package be.tarsos.dsp.effects; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; + +/** + *

+ * Adds a flanger effect to a signal. The implementation is done with a delay + * buffer and an LFO in the form of a sine wave. It is probably the most + * straightforward flanger implementation possible. + *

+ * + * @author Joren Six + */ +public class FlangerEffect implements AudioProcessor { + + /** + * A simple delay buffer, it holds a number of samples determined by the + * maxFlangerLength and the sample rate. + */ + private float[] flangerBuffer; + + /** + * The position in the delay buffer to store the current sample. + */ + private int writePosition; + + /** + * Determines the factor of original signal that remains in the final mix. + * Dry should always equal 1-wet). + */ + private float dry; + /** + * Determines the factor of flanged signal that is mixed in the final mix. + * Wet should always equal 1-dry. + */ + private float wet; + /** + * The frequency for the LFO (sine). + */ + private double lfoFrequency; + + /** + * The sample rate is neede to calculate the length of the delay buffer. + */ + private final double sampleRate; + + /** + * @param maxFlangerLength + * in seconds + * @param wet + * The 'wetness' of the flanging effect. A value between 0 and 1. + * Zero meaning no flanging effect in the resulting signal, one + * means total flanging effect and no original signal left. The + * dryness of the signal is determined by dry = "1-wet". + * @param sampleRate + * the sample rate in Hz. + * @param lfoFrequency + * in Hertz + */ + public FlangerEffect(double maxFlangerLength, double wet, + double sampleRate, double lfoFrequency) { + this.flangerBuffer = new float[(int) (sampleRate * maxFlangerLength)]; + this.sampleRate = sampleRate; + this.lfoFrequency = lfoFrequency; + this.wet = (float) wet; + this.dry = (float) (1 - wet); + } + + @Override + public boolean process(AudioEvent audioEvent) { + float[] audioFloatBuffer = audioEvent.getFloatBuffer(); + int overlap = audioEvent.getOverlap(); + + // Divide f by two, to counter rectifier below, which effectively + // doubles the frequency + double twoPIf = 2 * Math.PI * lfoFrequency / 2.0; + double time = audioEvent.getTimeStamp(); //in seconds + double timeStep = 1.0 / sampleRate; // also in seconds + + for (int i = overlap; i < audioFloatBuffer.length; i++) { + + // Calculate the LFO delay value with a sine wave: + //fix by hans bickel + double lfoValue = (flangerBuffer.length - 1) * Math.sin(twoPIf * time); + // add a time step, each iteration + time += timeStep; + + // Make the delay a positive integer, sine rectifier + int delay = (int) (Math.round(Math.abs(lfoValue))); + + // store the current sample in the delay buffer; + if (writePosition >= flangerBuffer.length) { + writePosition = 0; + } + flangerBuffer[writePosition] = audioFloatBuffer[i]; + + // find out the position to read the delayed sample: + int readPosition = writePosition - delay; + if (readPosition < 0) { + readPosition += flangerBuffer.length; + } + + //increment the write-position + writePosition++; + + // Output is the input summed with the value at the delayed flanger + // buffer + audioFloatBuffer[i] = dry * audioFloatBuffer[i] + wet * flangerBuffer[readPosition]; + } + return true; + } + + @Override + public void processingFinished() { + } + + /** + * Set the new length of the delay LineWavelet. + * + * @param flangerLength + * The new length of the delay LineWavelet, in seconds. + */ + public void setFlangerLength(double flangerLength) { + flangerBuffer = new float[(int) (sampleRate * flangerLength)]; + } + + /** + * Sets the frequency of the LFO (sine wave), in Hertz. + * + * @param lfoFrequency + * The new LFO frequency in Hertz. + */ + public void setLFOFrequency(double lfoFrequency) { + this.lfoFrequency = lfoFrequency; + } + + /** + * Sets the wetness and dryness of the effect. Should be a value between + * zero and one (inclusive), the dryness is determined by 1-wet. + * + * @param wet + * A value between zero and one (inclusive) that determines the + * wet and dryness of the resulting mix. + */ + public void setWet(double wet) { + this.wet = (float) wet; + this.dry = (float) (1 - wet); + } +} diff --git a/app/src/main/java/be/tarsos/dsp/effects/package-info.java b/app/src/main/java/be/tarsos/dsp/effects/package-info.java new file mode 100644 index 0000000..b3e80fc --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/effects/package-info.java @@ -0,0 +1,28 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/** + * Contains audio effects. + */ +package be.tarsos.dsp.effects; diff --git a/app/src/main/java/be/tarsos/dsp/filters/BandPass.java b/app/src/main/java/be/tarsos/dsp/filters/BandPass.java new file mode 100644 index 0000000..10b793f --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/filters/BandPass.java @@ -0,0 +1,104 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/* + * Copyright (c) 2007 - 2008 by Damien Di Fede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Library General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package be.tarsos.dsp.filters; + +/** + * A band pass filter is a filter that filters out all frequencies except for + * those in a band centered on the current frequency of the filter. + * + * @author Damien Di Fede + * + */ +public class BandPass extends IIRFilter +{ + private float bw; + + /** + * Constructs a band pass filter with the requested center frequency, + * bandwidth and sample rate. + * + * @param freq + * the center frequency of the band to pass (in Hz) + * @param bandWidth + * the width of the band to pass (in Hz) + * @param sampleRate + * the sample rate of audio that will be filtered by this filter + */ + public BandPass(float freq, float bandWidth, float sampleRate) + { + super(freq, sampleRate); + setBandWidth(bandWidth); + } + + /** + * Sets the band width of the filter. Doing this will cause the coefficients + * to be recalculated. + * + * @param bandWidth + * the band width (in Hz) + */ + public void setBandWidth(float bandWidth) + { + bw = bandWidth / getSampleRate(); + calcCoeff(); + } + + /** + * Returns the band width of this filter. + * + * @return the band width (in Hz) + */ + public float getBandWidth() + { + return bw * getSampleRate(); + } + + protected void calcCoeff() + { + float R = 1 - 3 * bw; + float fracFreq = getFrequency() / getSampleRate(); + float T = 2 * (float) Math.cos(2 * Math.PI * fracFreq); + float K = (1 - R * T + R * R) / (2 - T); + a = new float[] { 1 - K, (K - R) * T, R * R - K }; + b = new float[] { R * T, -R * R }; + } +} diff --git a/app/src/main/java/be/tarsos/dsp/filters/HighPass.java b/app/src/main/java/be/tarsos/dsp/filters/HighPass.java new file mode 100644 index 0000000..eb2bdc7 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/filters/HighPass.java @@ -0,0 +1,62 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/* + * Copyright (c) 2007 - 2008 by Damien Di Fede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Library General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package be.tarsos.dsp.filters; + +/** + * A High pass IIR filter. Frequency defines the cutoff. + * @author Joren Six + */ +public class HighPass extends IIRFilter{ + + public HighPass(float freq, float sampleRate) { + super(freq, sampleRate); + } + + protected void calcCoeff() + { + float fracFreq = getFrequency()/getSampleRate(); + float x = (float)Math.exp(-2 * Math.PI * fracFreq); + a = new float[] { (1+x)/2, -(1+x)/2 }; + b = new float[] { x }; + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/filters/IIRFilter.java b/app/src/main/java/be/tarsos/dsp/filters/IIRFilter.java new file mode 100644 index 0000000..eb8b24d --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/filters/IIRFilter.java @@ -0,0 +1,163 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/* + * Copyright (c) 2007 - 2008 by Damien Di Fede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Library General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package be.tarsos.dsp.filters; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; + +/** + * An Infinite Impulse Response, or IIR, filter is a filter that uses a set of + * coefficients and previous filtered values to filter a stream of audio. It is + * an efficient way to do digital filtering. IIRFilter is a general IIRFilter + * that simply applies the filter designated by the filter coefficients so that + * sub-classes only have to dictate what the values of those coefficients are by + * defining the calcCoeff() function. When filling the coefficient + * arrays, be aware that b[0] corresponds to + * b1. + * + * @author Damien Di Fede + * @author Joren Six + * + */ +public abstract class IIRFilter implements AudioProcessor { + + /** The b coefficients. */ + protected float[] b; + + /** The a coefficients. */ + protected float[] a; + + /** + * The input values to the left of the output value currently being + * calculated. + */ + protected float[] in; + + /** The previous output values. */ + protected float[] out; + + private float frequency; + + private final float sampleRate; + + + /** + * Constructs an IIRFilter with the given cutoff frequency that will be used + * to filter audio recorded at sampleRate. + * + * @param freq + * the cutoff frequency + * @param sampleRate + * the sample rate of audio to be filtered + */ + public IIRFilter(float freq, float sampleRate) { + this.sampleRate = sampleRate; + this.frequency = freq; + calcCoeff(); + in = new float[a.length]; + out = new float[b.length]; + } + + public void setFrequency(float freq){ + this.frequency = freq; + calcCoeff(); + } + + /** + * Returns the cutoff frequency (in Hz). + * + * @return the current cutoff frequency (in Hz). + */ + protected final float getFrequency() { + return frequency; + } + + protected final float getSampleRate(){ + return sampleRate; + } + + /** + * Calculates the coefficients of the filter using the current cutoff + * frequency. To make your own IIRFilters, you must extend IIRFilter and + * implement this function. The frequency is expressed as a fraction of the + * sample rate. When filling the coefficient arrays, be aware that + * b[0] corresponds to the coefficient + * b1. + * + */ + protected abstract void calcCoeff() ; + + + @Override + public boolean process(AudioEvent audioEvent) { + float[] audioFloatBuffer = audioEvent.getFloatBuffer(); + + for (int i = audioEvent.getOverlap(); i < audioFloatBuffer.length; i++) { + //shift the in array + System.arraycopy(in, 0, in, 1, in.length - 1); + in[0] = audioFloatBuffer[i]; + + //calculate y based on a and b coefficients + //and in and out. + float y = 0; + for(int j = 0 ; j < a.length ; j++){ + y += a[j] * in[j]; + } + for(int j = 0 ; j < b.length ; j++){ + y += b[j] * out[j]; + } + //shift the out array + System.arraycopy(out, 0, out, 1, out.length - 1); + out[0] = y; + + audioFloatBuffer[i] = y; + } + return true; + } + + + @Override + public void processingFinished() { + + } +} diff --git a/app/src/main/java/be/tarsos/dsp/filters/LowPassFS.java b/app/src/main/java/be/tarsos/dsp/filters/LowPassFS.java new file mode 100644 index 0000000..be4f83f --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/filters/LowPassFS.java @@ -0,0 +1,64 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/* + * Copyright (c) 2007 - 2008 by Damien Di Fede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Library General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package be.tarsos.dsp.filters; + +/** + * Four stage low pass filter. + * + */ +public class LowPassFS extends IIRFilter{ + + public LowPassFS(float freq, float sampleRate) { + //minimum frequency is 60Hz! + super(freq>60?freq:60, sampleRate); + } + + @Override + protected void calcCoeff() { + float freqFrac = getFrequency() / getSampleRate(); + float x = (float) Math.exp(-14.445 * freqFrac); + a = new float[] { (float) Math.pow(1 - x, 4) }; + b = new float[] { 4 * x, -6 * x * x, 4 * x * x * x, -x * x * x * x }; + } + + +} diff --git a/app/src/main/java/be/tarsos/dsp/filters/LowPassSP.java b/app/src/main/java/be/tarsos/dsp/filters/LowPassSP.java new file mode 100644 index 0000000..95acf99 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/filters/LowPassSP.java @@ -0,0 +1,62 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/* + * Copyright (c) 2007 - 2008 by Damien Di Fede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Library General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +package be.tarsos.dsp.filters; + +/** + * Single pass low pass filter. + * @author Joren Six + */ +public class LowPassSP extends IIRFilter { + + public LowPassSP(float freq, float sampleRate) { + super(freq, sampleRate); + } + + @Override + protected void calcCoeff() { + float fracFreq = getFrequency() / getSampleRate(); + float x = (float) Math.exp(-2 * Math.PI * fracFreq); + a = new float[] { 1 - x }; + b = new float[] { x }; + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/filters/package-info.java b/app/src/main/java/be/tarsos/dsp/filters/package-info.java new file mode 100644 index 0000000..8bc73b0 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/filters/package-info.java @@ -0,0 +1,28 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/** + * This package contains a couple of filters (low and highpass). + */ +package be.tarsos.dsp.filters; diff --git a/app/src/main/java/be/tarsos/dsp/granulator/Grain.java b/app/src/main/java/be/tarsos/dsp/granulator/Grain.java new file mode 100644 index 0000000..2921977 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/granulator/Grain.java @@ -0,0 +1,35 @@ +package be.tarsos.dsp.granulator; + +/** + * The nested class Grain. Stores information about the start time, current position, age, and grain size of the grain. + */ +class Grain{ + + /** The position in millseconds. */ + double position; + + /** The age of the grain in milliseconds. */ + double age; + + /** The grain size of the grain. Fixed at instantiation. */ + double grainSize; + + boolean active; + + /** + * Sets the given Grain to start immediately. + * + * @param g + * the g + * @param time + * the time + */ + void reset(double grainSize,double randomness,double position,double timeStretchFactor,double pitchShiftFactor){ + double randomTimeDiff = (Math.random() > 0.5 ? +1 : -1) * grainSize * randomness; + double actualGrainSize = (grainSize + randomTimeDiff) /timeStretchFactor + 1; + this.position = position - actualGrainSize; + this.age = 0f; + this.grainSize = actualGrainSize; + this.active =true; + } +} \ No newline at end of file diff --git a/app/src/main/java/be/tarsos/dsp/granulator/Granulator.java b/app/src/main/java/be/tarsos/dsp/granulator/Granulator.java new file mode 100644 index 0000000..8ce8688 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/granulator/Granulator.java @@ -0,0 +1,363 @@ +package be.tarsos.dsp.granulator; + + +import java.util.ArrayList; +import java.util.Arrays; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; + +/** + * Granulator plays back samples using granular synthesis. + * Methods can be used to control playback rate, pitch, grain size, + * grain interval and grain randomness and position (this last case assumes that the playback rate is zero). + * + * + * + * @author ollie + * @author Joren + */ +public class Granulator implements AudioProcessor { + + public static final float ADAPTIVE_INTERP_LOW_THRESH = 0.5f; + public static final float ADAPTIVE_INTERP_HIGH_THRESH = 2.5f; + + /** The position in milliseconds. */ + protected double position; + + /** + * The millisecond position increment per sample. Calculated from the ratio + * of the {@link AudioContext}'s sample rate and the {@link Sample}'s sample + * rate. + */ + private final double positionIncrement; + + + private float grainInterval; + private float grainSize; + private float grainRandomness; + + /** The time in milliseconds since the last grain was activated. */ + private float timeSinceLastGrain; + + /** The length of one sample in milliseconds. */ + private final double msPerSample; + + /** The pitch, bound to the pitch envelope. */ + private float pitchFactor; + + /** The pitch, bound to the pitch envelope. */ + private float timeStretchFactor; + + /** The list of current grains. */ + private final ArrayList grains; + + /** A list of free grains. */ + private final ArrayList freeGrains; + + /** A list of dead grains. */ + private final ArrayList deadGrains; + + /** The interpolation type. */ + //protected InterpolationType interpolationType; + + /** The window used by grains. */ + private final float[] window; + + + private final float[] audioBuffer; + private int audioBufferWatermark; + + private final float[] outputBuffer; + + + /** + * Instantiates a new GranularSamplePlayer. + * + * @param sampleRate the sample rate. + * @param bufferSize the size of an output buffer. + */ + public Granulator(float sampleRate,int bufferSize) { + grains = new ArrayList(); + freeGrains = new ArrayList(); + deadGrains = new ArrayList(); + + audioBuffer = new float[(int) (12*60*sampleRate)];//max 12 minutes of audio + audioBufferWatermark = 0; + + pitchFactor = 1.0f; + + grainInterval = 40.0f; + grainSize = 100.0f; + grainRandomness = 0.1f; + + window = new be.tarsos.dsp.util.fft.CosineWindow().generateCurve(bufferSize); + outputBuffer = new float[bufferSize]; + + msPerSample = 1000.0f/sampleRate; + + positionIncrement = msPerSample; + } + + + public void start() { + timeSinceLastGrain = 0; + } + + + /** Flag to indicate special case for the first grain. */ + private boolean firstGrain = true; + + /** Special case method for playing first grain. */ + private void firstGrain() { + if(firstGrain) { + Grain g = new Grain(); + g.position = position; + g.age = grainSize / 4f; + g.grainSize = grainSize; + + grains.add(g); + firstGrain = false; + timeSinceLastGrain = grainInterval / 2f; + } + } + + @Override + public boolean process(AudioEvent audioEvent) { + System.arraycopy(audioEvent.getFloatBuffer(), 0, audioBuffer, + audioBufferWatermark, audioEvent.getBufferSize()); + audioBufferWatermark += audioEvent.getBufferSize(); + + // grains.clear(); + // position = audioEvent.getTimeStamp()*1000 - 5000; + + // reset output + Arrays.fill(outputBuffer, 0); + + firstGrain(); + + int bufferSize = audioEvent.getBufferSize(); + + // now loop through the buffer + for (int i = 0; i < bufferSize; i++) { + // determine if we need a new grain + if (timeSinceLastGrain > grainInterval) { + Grain g = null; + if (freeGrains.size() > 0) { + g = freeGrains.get(0); + freeGrains.remove(0); + } else { + g = new Grain(); + } + g.reset(grainSize, grainRandomness, position,timeStretchFactor,pitchFactor); + grains.add(g); + timeSinceLastGrain = 0f; + //System.out.println(grains.size()); + } + + // gather the output from each grain + for (int gi = 0; gi < grains.size(); gi++) { + Grain g = grains.get(gi); + // calculate value of grain window + float windowScale = getValueFraction((float) (g.age / g.grainSize)); + // get position in sample for this grain + // get the frame for this grain + + double sampleValue; + getFrameLinear(g.position); + if (pitchFactor > ADAPTIVE_INTERP_HIGH_THRESH) { + sampleValue = getFrameNoInterp(g.position); + } else if (pitchFactor > ADAPTIVE_INTERP_LOW_THRESH) { + sampleValue = getFrameLinear(g.position); + } else { + sampleValue = getFrameCubic(g.position); + } + sampleValue = sampleValue * windowScale; + outputBuffer[i] += (float) sampleValue; + } + // increment time + position += positionIncrement * timeStretchFactor; + + for (int gi = 0; gi < grains.size(); gi++) { + Grain g = grains.get(gi); + calculateNextGrainPosition(g); + } + // increment timeSinceLastGrain + timeSinceLastGrain += msPerSample; + // finally, see if any grains are dead + for (int gi = 0; gi < grains.size(); gi++) { + Grain g = grains.get(gi); + if (g.age > g.grainSize) { + freeGrains.add(g); + deadGrains.add(g); + } + } + for (int gi = 0; gi < deadGrains.size(); gi++) { + Grain g = deadGrains.get(gi); + grains.remove(g); + } + deadGrains.clear(); + } + audioEvent.setFloatBuffer(outputBuffer); + + return true; + } + + + /** + * Retrieves a frame of audio using linear interpolation. If the frame is + * not in the sample range then zeros are returned. + * + * @param posInMS + * The frame to read -- can be fractional (e.g., 4.4). + * @return + * The framedata to fill. + */ + public double getFrameLinear(double posInMS) { + double result = 0.0; + double sampleNumber = msToSamples(posInMS); + int sampleNumberFloor = (int) Math.floor(sampleNumber); + if (sampleNumberFloor > 0 && sampleNumberFloor < audioBufferWatermark) { + double sampleNumberFraction = sampleNumber - sampleNumberFloor; + if (sampleNumberFloor == audioBufferWatermark - 1) { + result = audioBuffer[sampleNumberFloor]; + } else { + // linear interpolation + double current = audioBuffer[sampleNumberFloor]; + double next = audioBuffer[sampleNumberFloor]; + result = (float) ((1 - sampleNumberFraction) * current + sampleNumberFraction * next); + } + } + return result; + } + + /** + * Retrieves a frame of audio using no interpolation. If the frame is not in + * the sample range then zeros are returned. + * + * @param posInMS + * The frame to read -- will take the last frame before this one. + * + */ + public float getFrameNoInterp(double posInMS) { + double frame = msToSamples(posInMS); + int frame_floor = (int) Math.floor(frame); + return audioBuffer[frame_floor]; + } + + /** + * Retrieves a frame of audio using cubic interpolation. If the frame is not + * in the sample range then zeros are returned. + * + * @param posInMS + * The frame to read -- can be fractional (e.g., 4.4). + */ + public float getFrameCubic(double posInMS) { + float frame = (float) msToSamples(posInMS); + float result = 0.0f; + float a0, a1, a2, a3, mu2; + float ym1, y0, y1, y2; + + int realCurrentSample = (int) Math.floor(frame); + float fractionOffset = (float) (frame - realCurrentSample); + + if (realCurrentSample >= 0 && realCurrentSample < (audioBufferWatermark - 1)) { + realCurrentSample--; + if (realCurrentSample < 0) { + ym1 = audioBuffer[0]; + realCurrentSample = 0; + } else { + ym1 = audioBuffer[realCurrentSample++]; + } + y0 = audioBuffer[realCurrentSample++]; + if (realCurrentSample >= audioBufferWatermark) { + y1 = audioBuffer[audioBufferWatermark-1]; // ?? + } else { + y1 = audioBuffer[realCurrentSample++]; + } + if (realCurrentSample >= audioBufferWatermark) { + y2 = audioBuffer[audioBufferWatermark-1]; + } else { + y2 = audioBuffer[realCurrentSample++]; + } + mu2 = fractionOffset * fractionOffset; + a0 = y2 - y1 - ym1 + y0; + a1 = ym1 - y0 - a0; + a2 = y1 - ym1; + a3 = y0; + result = a0 * fractionOffset * mu2 + a1 * mu2 + a2 * fractionOffset + a3; + } + return result; + } + + + private double msToSamples(double posInMs){ + return (posInMs) / msPerSample; + } + + @Override + public void processingFinished() { + + } + + /** + * Returns the value of the buffer at the given fraction along its length (0 = start, 1 = end). Uses linear interpolation. + * + * @param fraction the point along the buffer to inspect. + * + * @return the value at that point. + */ + public float getValueFraction(float fraction) { + float posInBuf = fraction * window.length; + int lowerIndex = (int)posInBuf; + float offset = posInBuf - lowerIndex; + int upperIndex = (lowerIndex + 1) % window.length; + return (1 - offset) * window[lowerIndex] + offset * window[upperIndex]; + } + + /** + * Calculate next position for the given Grain. + * + * @param g the Grain. + */ + private void calculateNextGrainPosition(Grain g) { + int direction = timeStretchFactor >= 0 ? 1 : -1; //this is a bit odd in the case when controlling grain from positionEnvelope + g.age += msPerSample; + g.position += direction * positionIncrement * pitchFactor; + } + + public void setTimestretchFactor(float currentFactor) { + timeStretchFactor = currentFactor; + } + + public void setPitchShiftFactor(float currentFactor) { + pitchFactor = currentFactor; + } + + + + public void setGrainInterval(int grainInterval) { + this.grainInterval = grainInterval; + } + + + + public void setGrainSize(int grainSize) { + this.grainSize = grainSize; + + } + + public void setGrainRandomness(float grainRandomness) { + this.grainRandomness = grainRandomness; + } + + + + /** + * @param position in seconds + */ + public void setPosition(float position) { + this.position = position * 1000; + } +} + diff --git a/app/src/main/java/be/tarsos/dsp/granulator/OptimizedGranulator.java b/app/src/main/java/be/tarsos/dsp/granulator/OptimizedGranulator.java new file mode 100644 index 0000000..5606e03 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/granulator/OptimizedGranulator.java @@ -0,0 +1,380 @@ +package be.tarsos.dsp.granulator; + + +import java.util.Arrays; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; + +/** + * Granulator plays back samples using granular synthesis. + * Methods can be used to control playback rate, pitch, grain size, + * grain interval and grain randomness and position (this last case assumes that the playback rate is zero). + * + * + * + * @author ollie + * @author Joren + */ +public class OptimizedGranulator implements AudioProcessor { + + public static final float ADAPTIVE_INTERP_LOW_THRESH = 0.5f; + public static final float ADAPTIVE_INTERP_HIGH_THRESH = 2.5f; + + /** The position in milliseconds. */ + protected double position; + + /** + * The millisecond position increment per sample. Calculated from the ratio + * of the sample rate + */ + private final double audioSampleLength; + + + private float grainInterval; + private float grainSize; + private float grainRandomness; + + /** The time in milliseconds since the last grain was activated. */ + private float timeSinceLastGrain; + + + /** The pitch, bound to the pitch envelope. */ + private float pitchFactor; + + /** The pitch, bound to the pitch envelope. */ + private float timeStretchFactor; + + /** The list of current grains. */ + private final Grain[] grains; + + /** The interpolation type. */ + //protected InterpolationType interpolationType; + + /** The window used by grains. */ + private final float[] window; + + + private final float[] audioBuffer; + private int audioBufferWatermark; + + private final float[] outputBuffer; + + + /** + * Instantiates a new GranularSamplePlayer. + * + * @param sampleRate the sample rate. + * @param bufferSize the size of an output buffer. + */ + public OptimizedGranulator(float sampleRate,int bufferSize) { + grains = new Grain[50]; + for(int i = 0 ; i < grains.length ; i++){ + grains[i] = new Grain(); + } + + audioBuffer = new float[4800*2];//max 0.2s + audioBufferWatermark = 0; + + pitchFactor = 1.0f; + + grainInterval = 40.0f; + grainSize = 100.0f; + grainRandomness = 0.1f; + + window = new be.tarsos.dsp.util.fft.CosineWindow().generateCurve(512); + outputBuffer = new float[bufferSize]; + + audioSampleLength = 1000.0f/sampleRate; + } + + + public void start() { + timeSinceLastGrain = 0; + } + + + /** Flag to indicate special case for the first grain. */ + private boolean firstGrain = true; + + /** Special case method for playing first grain. */ + private void firstGrain() { + if(firstGrain) { + Grain g = grains[0]; + g.position = position; + g.age = grainSize / 4f; + g.grainSize = grainSize; + + firstGrain = false; + timeSinceLastGrain = grainInterval / 2f; + } + } + + @Override + public boolean process(AudioEvent audioEvent) { + + int bufferSize = audioEvent.getBufferSize(); + for (int i = 0; i < bufferSize; i++) { + audioBuffer[audioBufferWatermark] = audioEvent.getFloatBuffer()[i]; + audioBufferWatermark++; + if(audioBufferWatermark==audioBuffer.length){ + audioBufferWatermark=0; + + } + } + + System.out.println("Buffer water mark:" + audioBufferWatermark); + + // grains.clear(); + // position = audioEvent.getTimeStamp()*1000 - 5000; + + // reset output + Arrays.fill(outputBuffer, 0); + + firstGrain(); + + int activeGrains = 0; + for(int j = 0 ; j < grains.length ; j++){ + if(grains[j].active){ + activeGrains++; + } + } + System.out.println("Active grains = " + activeGrains); + + // now loop through the buffer + for (int i = 0; i < bufferSize; i++) { + // determine if we need a new grain + if (timeSinceLastGrain > grainInterval) { + Grain firstInactiveGrain = null; + for(int j = 0 ; j < grains.length ; j++){ + if(!grains[j].active){ + firstInactiveGrain = grains[j]; + firstInactiveGrain.reset(grainSize, grainRandomness, position,timeStretchFactor,pitchFactor); + timeSinceLastGrain = 0f; + break; + } + } + //System.out.println(grains.size()); + } + + // gather the output from each grain + for (int gi = 0; gi < grains.length; gi++) { + Grain g = grains[gi]; + if(g.active){ + // calculate value of grain window + float windowScale = getValueFraction((float) (g.age / g.grainSize)); + // get position in sample for this grain + // get the frame for this grain + + double sampleValue; + //if (pitchFactor > ADAPTIVE_INTERP_HIGH_THRESH) { + sampleValue = getFrameNoInterp(g.position); + //} else if (pitchFactor > ADAPTIVE_INTERP_LOW_THRESH) { + // sampleValue = getFrameLinear(g.position); + //} else { + // sampleValue = getFrameCubic(g.position); + //} + sampleValue = sampleValue * windowScale; + outputBuffer[i] += (float) sampleValue; + } + } + // increment time + position += audioSampleLength * timeStretchFactor; + + for (int gi = 0; gi < grains.length; gi++) { + Grain g = grains[gi]; + if(g.active){ + calculateNextGrainPosition(g); + + if (g.age > g.grainSize) { + g.active = false; + } + } + } + + timeSinceLastGrain += audioSampleLength; + } + + for (int i = 0; i < bufferSize; i++) { + outputBuffer[i] = outputBuffer[i]/(float) 5.0f; + } + + audioEvent.setFloatBuffer(outputBuffer); + + return true; + } + + + /** + * Retrieves a frame of audio using linear interpolation. If the frame is + * not in the sample range then zeros are returned. + * + * @param posInMS + * The frame to read -- can be fractional (e.g., 4.4). + * @return + * The framedata to fill. + */ + public double getFrameLinear(double posInMS) { + double result = 0.0; + double sampleNumber = msToSamples(posInMS); + int sampleNumberFloor = (int) Math.floor(sampleNumber); + if (sampleNumberFloor > 0 && sampleNumberFloor < audioBufferWatermark) { + double sampleNumberFraction = sampleNumber - sampleNumberFloor; + if (sampleNumberFloor == audioBufferWatermark - 1) { + result = audioBuffer[sampleNumberFloor]; + } else { + // linear interpolation + double current = audioBuffer[sampleNumberFloor]; + double next = audioBuffer[sampleNumberFloor]; + result = (float) ((1 - sampleNumberFraction) * current + sampleNumberFraction * next); + } + } + return result; + } + + /** + * Retrieves a frame of audio using no interpolation. If the frame is not in + * the sample range then zeros are returned. + * + * @param posInMS + * The frame to read -- will take the last frame before this one. + * + */ + public float getFrameNoInterp(double posInMS) { + double frame = msToSamples(posInMS); + + int frame_floor = (int) Math.floor(frame); + + //int diff = audioBufferWatermark - frame_floor; + //if( diff < 4800 || diff > ) + + + return audioBuffer[frame_floor]; + } + + /** + * Retrieves a frame of audio using cubic interpolation. If the frame is not + * in the sample range then zeros are returned. + * + * @param posInMS + * The frame to read -- can be fractional (e.g., 4.4). + */ + public float getFrameCubic(double posInMS) { + float frame = (float) msToSamples(posInMS); + float result = 0.0f; + float a0, a1, a2, a3, mu2; + float ym1, y0, y1, y2; + + int realCurrentSample = (int) Math.floor(frame); + float fractionOffset = (float) (frame - realCurrentSample); + + if (realCurrentSample >= 0 && realCurrentSample < (audioBufferWatermark - 1)) { + realCurrentSample--; + if (realCurrentSample < 0) { + ym1 = audioBuffer[0]; + realCurrentSample = 0; + } else { + ym1 = audioBuffer[realCurrentSample++]; + } + y0 = audioBuffer[realCurrentSample++]; + if (realCurrentSample >= audioBufferWatermark) { + y1 = audioBuffer[audioBufferWatermark-1]; // ?? + } else { + y1 = audioBuffer[realCurrentSample++]; + } + if (realCurrentSample >= audioBufferWatermark) { + y2 = audioBuffer[audioBufferWatermark-1]; + } else { + y2 = audioBuffer[realCurrentSample++]; + } + mu2 = fractionOffset * fractionOffset; + a0 = y2 - y1 - ym1 + y0; + a1 = ym1 - y0 - a0; + a2 = y1 - ym1; + a3 = y0; + result = a0 * fractionOffset * mu2 + a1 * mu2 + a2 * fractionOffset + a3; + } + return result; + } + + + private double msToSamples(double posInMs){ + double positionInSamples = posInMs / audioSampleLength; + if(positionInSamples < 0){ + positionInSamples = 0; + }else{ + int bufferNumber = (int) (positionInSamples/audioBuffer.length); + positionInSamples = positionInSamples - bufferNumber * audioBuffer.length; + } + return positionInSamples; + } + + @Override + public void processingFinished() { + + } + + /** + * Returns the value of the buffer at the given fraction along its length (0 = start, 1 = end). Uses linear interpolation. + * + * @param fraction the point along the buffer to inspect. + * + * @return the value at that point. + */ + public float getValueFraction(float fraction) { + float posInBuf = fraction * window.length; + if(fraction >= 1.0f){ + posInBuf -= 1.0f; + } + int lowerIndex = (int) posInBuf; + float offset = posInBuf - lowerIndex; + int upperIndex = (lowerIndex + 1) % window.length; + return (1 - offset) * window[lowerIndex] + offset * window[upperIndex]; + } + + /** + * Calculate next position for the given Grain. + * + * @param g the Grain. + */ + private void calculateNextGrainPosition(Grain g) { + int direction = timeStretchFactor >= 0 ? 1 : -1; //this is a bit odd in the case when controlling grain from positionEnvelope + g.age += audioSampleLength; + g.position += direction * audioSampleLength * pitchFactor; + } + + public void setTimestretchFactor(float currentFactor) { + timeStretchFactor = currentFactor; + } + + public void setPitchShiftFactor(float currentFactor) { + pitchFactor = currentFactor; + } + + + + public void setGrainInterval(int grainInterval) { + this.grainInterval = grainInterval; + } + + + + public void setGrainSize(int grainSize) { + this.grainSize = grainSize; + + } + + public void setGrainRandomness(float grainRandomness) { + this.grainRandomness = grainRandomness; + } + + + + /** + * @param position in seconds + */ + public void setPosition(float position) { + this.position = position * 1000; + } +} + diff --git a/app/src/main/java/be/tarsos/dsp/granulator/package-info.java b/app/src/main/java/be/tarsos/dsp/granulator/package-info.java new file mode 100644 index 0000000..084ab55 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/granulator/package-info.java @@ -0,0 +1,28 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/** + * This package contains a couple of classes for audio granulation. + */ +package be.tarsos.dsp.granulator; diff --git a/app/src/main/java/be/tarsos/dsp/io/PipeDecoder.java b/app/src/main/java/be/tarsos/dsp/io/PipeDecoder.java new file mode 100644 index 0000000..8d58b50 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/io/PipeDecoder.java @@ -0,0 +1,391 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.io; + +import java.io.BufferedInputStream; +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.nio.ByteOrder; +import java.util.logging.Logger; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import be.tarsos.dsp.util.FFMPEGDownloader; + +/** + *

+ * Decode audio files to PCM, mono, 16bits per sample, at any sample rate using + * an external program. By default ffmpeg is used. Other + * command Line programs that are able to decode audio and pipe binary PCM + * samples to STDOUT are possible as well (avconv, mplayer). + * To install ffmpeg on Debian: apt-get install ffmpeg. + *

+ *

+ * This adds support for a lot of audio formats and video container formats with + * relatively little effort. Depending on the program used also http streams, + * rtpm streams, ... are supported as well. + *

+ *

+ * To see which audio decoders are supported, check + *

+ * ffmpeg -decoders | grep -E "^A" | sort +avconv version 9.8, Copyright (c) 2000-2013 the Libav developers + built on Aug 26 2013 09:52:20 with gcc 4.4.3 (Ubuntu 4.4.3-4ubuntu5.1) +A... 8svx_exp 8SVX exponential +A... 8svx_fib 8SVX fibonacci +A... aac AAC (Advanced Audio Coding) +A... aac_latm AAC LATM (Advanced Audio Coding LATM syntax) +A... ac3 ATSC A/52A (AC-3) +A... adpcm_4xm ADPCM 4X Movie +... + + * + * @author Joren Six + */ +public class PipeDecoder { + + private final static Logger LOG = Logger.getLogger(PipeDecoder.class.getName()); + private final String pipeEnvironment; + private final String pipeArgument; + private final String pipeCommand; + private final int pipeBuffer; + + private boolean printErrorstream = false; + + private String decoderBinaryAbsolutePath; + + public PipeDecoder(){ + pipeBuffer = 10000; + + //Use sensible defaults depending on the platform + if(System.getProperty("os.name").indexOf("indows") > 0 ){ + pipeEnvironment = "cmd.exe"; + pipeArgument = "/C"; + }else if(new File("/bin/bash").exists()){ + pipeEnvironment = "/bin/bash"; + pipeArgument = "-c"; + }else if (new File("/system/bin/sh").exists()){ + //probably we are on android here + pipeEnvironment = "/system/bin/sh"; + pipeArgument = "-c"; + }else{ + LOG.severe("Coud not find a command line environment (cmd.exe or /bin/bash)"); + throw new Error("Decoding via a pipe will not work: Coud not find a command line environment (cmd.exe or /bin/bash)"); + } + + String path = System.getenv("PATH"); + String arguments = " -ss %input_seeking% %number_of_seconds% -i \"%resource%\" -vn -ar %sample_rate% -ac %channels% -sample_fmt s16 -f s16le pipe:1"; + if(isAvailable("ffmpeg")){ + LOG.info("found ffmpeg on the path (" + path + "). Will use ffmpeg for decoding media files."); + pipeCommand = "ffmpeg" + arguments; + } else { + if(isAndroid()) { + String tempDirectory = System.getProperty("java.io.tmpdir"); + printErrorstream=true; + File f = new File(tempDirectory, "ffmpeg"); + if (f.exists() && f.length() > 1000000 && f.canExecute()) { + decoderBinaryAbsolutePath = f.getAbsolutePath(); + } else { + LOG.severe("Could not find an ffmpeg binary for your Android system. Did you forget calling: 'new AndroidFFMPEGLocator(this);' ?"); + LOG.severe("Tried to unpack a statically compiled ffmpeg binary for your architecture to: " + f.getAbsolutePath()); + } + }else{ + LOG.warning("Dit not find ffmpeg or avconv on your path(" + path + "), will try to download it automatically."); + FFMPEGDownloader downloader = new FFMPEGDownloader(); + decoderBinaryAbsolutePath = downloader.ffmpegBinary(); + if(decoderBinaryAbsolutePath==null){ + LOG.severe("Could not download an ffmpeg binary automatically for your system."); + } + } + if(decoderBinaryAbsolutePath == null){ + pipeCommand = "false"; + throw new Error("Decoding via a pipe will not work: Could not find an ffmpeg binary for your system"); + }else{ + pipeCommand = '"' + decoderBinaryAbsolutePath + '"' + arguments; + } + } + } + + private boolean isAvailable(String command){ + try{ + Runtime.getRuntime().exec(command + " -version"); + return true; + }catch (Exception e){ + return false; + } + } + + public PipeDecoder(String pipeEnvironment,String pipeArgument,String pipeCommand,String pipeLogFile,int pipeBuffer){ + this.pipeEnvironment = pipeEnvironment; + this.pipeArgument = pipeArgument; + this.pipeCommand = pipeCommand; + this.pipeBuffer = pipeBuffer; + } + + + public InputStream getDecodedStream(final String resource,final int targetSampleRate,final double timeOffset, double numberOfSeconds) { + + try { + String command = pipeCommand; + command = command.replace("%input_seeking%",String.valueOf(timeOffset)); + //defines the number of seconds to process + // -t 10.000 e.g. specifies to process ten seconds + // from the specified time offset (which is often zero). + if(numberOfSeconds>0){ + command = command.replace("%number_of_seconds%","-t " + numberOfSeconds); + } else { + command = command.replace("%number_of_seconds%",""); + } + command = command.replace("%resource%", resource); + command = command.replace("%sample_rate%", String.valueOf(targetSampleRate)); + command = command.replace("%channels%","1"); + + ProcessBuilder pb; + pb= new ProcessBuilder(pipeEnvironment, pipeArgument , command); + + LOG.info("Starting piped decoding process for " + resource); + LOG.info(" with command: " + command); + final Process process = pb.start(); + + final InputStream stdOut = new BufferedInputStream(process.getInputStream(), pipeBuffer){ + @Override + public void close() throws IOException{ + super.close(); + // try to destroy the ffmpeg command after close + process.destroy(); + } + }; + + if(printErrorstream) { + //print to log if requested + new ErrorStreamGobbler(process.getErrorStream(),LOG).start(); + }else{ + //makes sure the error stream is handled + //fix by SalomonBrys + //see https://github.com/JorenSix/TarsosDSP/pull/212 + new ErrorStreamIgnorer(process.getErrorStream()).start(); + } + + new Thread(new Runnable(){ + @Override + public void run() { + try { + process.waitFor(); + LOG.info("Finished piped decoding process"); + } catch (InterruptedException e) { + LOG.severe("Interrupted while waiting for decoding sub process exit."); + e.printStackTrace(); + } + }},"Decoding Pipe").start(); + return stdOut; + } catch (IOException e) { + LOG.warning("IO exception while decoding audio via sub process." + e.getMessage() ); + e.printStackTrace(); + } + return null; + } + + public double getDuration(final String resource) { + double duration = -1; + try { + //use " for windows compatibility! + String command = "ffmpeg -i \"%resource%\""; + + command = command.replace("%resource%", resource); + + ProcessBuilder pb; + pb = new ProcessBuilder(pipeEnvironment, pipeArgument , command); + + LOG.info("Starting duration command for " + resource); + LOG.fine(" with command: " + command); + final Process process = pb.start(); + + final InputStream stdOut = new BufferedInputStream(process.getInputStream(), pipeBuffer){ + @Override + public void close() throws IOException{ + super.close(); + // try to destroy the ffmpeg command after close + process.destroy(); + } + }; + + ErrorStreamStringGlobber essg = new ErrorStreamStringGlobber(process.getErrorStream()); + essg.start(); + + new Thread(new Runnable(){ + @Override + public void run() { + try { + process.waitFor(); + LOG.info("Finished piped decoding process"); + } catch (InterruptedException e) { + LOG.severe("Interrupted while waiting for decoding sub process exit."); + e.printStackTrace(); + } + }},"Decoding Pipe").run(); + + String stdError = essg.getErrorStreamAsString(); + Pattern regex = Pattern.compile(".*\\s.*Duration:\\s+(\\d\\d):(\\d\\d):(\\d\\d)\\.(\\d\\d), .*", Pattern.DOTALL | Pattern.MULTILINE); + Matcher regexMatcher = regex.matcher(stdError); + if (regexMatcher.find()) { + duration = Integer.valueOf(regexMatcher.group(1)) * 3600+ + Integer.valueOf(regexMatcher.group(2)) * 60+ + Integer.valueOf(regexMatcher.group(3)) + + Double.valueOf("." + regexMatcher.group(4) ); + } + } catch (IOException e) { + LOG.warning("IO exception while decoding audio via sub process." + e.getMessage() ); + e.printStackTrace(); + } + return duration; + } + + public void printBinaryInfo(){ + try { + Process p = Runtime.getRuntime().exec(decoderBinaryAbsolutePath); + BufferedReader input = new BufferedReader(new InputStreamReader(p.getErrorStream())); + String line = null; + while ((line = input.readLine()) != null) { + System.out.println(line); + } + input.close(); + //int exitVal = + p.waitFor(); + } catch (InterruptedException e) { + e.printStackTrace(); + } catch (IOException e) { + e.printStackTrace(); + } + } + + /** + * Constructs the target audio format. The audio format is one channel + * signed PCM of a given sample rate. + * + * @param targetSampleRate + * The sample rate to convert to. + * @return The audio format after conversion. + */ + public static TarsosDSPAudioFormat getTargetAudioFormat(int targetSampleRate) { + TarsosDSPAudioFormat audioFormat = new TarsosDSPAudioFormat(TarsosDSPAudioFormat.Encoding.PCM_SIGNED, + targetSampleRate, + 2 * 8, + 1, + 2, + targetSampleRate, + ByteOrder.BIG_ENDIAN.equals(ByteOrder.nativeOrder())); + return audioFormat; + } + + + private boolean isAndroid(){ + try { + // This class is only available on android + Class.forName("android.app.Activity"); + System.out.println("Running on Android!"); + return true; + } catch(ClassNotFoundException e) { + //the class is not found when running JVM + return false; + } + } + + private static class ErrorStreamIgnorer extends Thread { + private final InputStream is; + + private ErrorStreamIgnorer(InputStream is) { + this.is = is; + } + + @Override + public void run() { + try { + InputStreamReader isr = new InputStreamReader(is); + BufferedReader br = new BufferedReader(isr); + String line = null; + while ((line = br.readLine()) != null) {} + } + catch (IOException ioe) { + ioe.printStackTrace(); + } + } + } + + private class ErrorStreamGobbler extends Thread { + private final InputStream is; + private final Logger logger; + + private ErrorStreamGobbler(InputStream is, Logger logger) { + this.is = is; + this.logger = logger; + } + + @Override + public void run() { + try { + InputStreamReader isr = new InputStreamReader(is); + BufferedReader br = new BufferedReader(isr); + String line = null; + while ((line = br.readLine()) != null) { + logger.info(line); + } + } + catch (IOException ioe) { + ioe.printStackTrace(); + } + } + } + + private class ErrorStreamStringGlobber extends Thread { + private final InputStream is; + private final StringBuilder sb; + + private ErrorStreamStringGlobber(InputStream is) { + this.is = is; + this.sb = new StringBuilder(); + } + + @Override + public void run() { + try { + InputStreamReader isr = new InputStreamReader(is); + BufferedReader br = new BufferedReader(isr); + String line = null; + while ((line = br.readLine()) != null) { + sb.append(line); + } + } + catch (IOException ioe) { + ioe.printStackTrace(); + } + } + + public String getErrorStreamAsString(){ + return sb.toString(); + } + } +} diff --git a/app/src/main/java/be/tarsos/dsp/io/PipedAudioStream.java b/app/src/main/java/be/tarsos/dsp/io/PipedAudioStream.java new file mode 100644 index 0000000..fae6012 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/io/PipedAudioStream.java @@ -0,0 +1,94 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.io; + +import java.io.InputStream; + +import be.tarsos.dsp.util.AudioResourceUtils; + + +/** + * An audio file can be used to convert and read from. It uses libAV to convert + * about any audio format to a one channel PCM stream of a chosen sample rate. There is + * support for movie files as well, the first audio channel is then used as input. + * The resource is either a local file or a type of stream supported by libAV (e.g. HTTP streams); + * + * For a list of audio decoders the following command is practical: + *
+avconv -decoders | grep -E "^A" | sort
+ 
+
+A... 8svx_exp             8SVX exponential
+A... 8svx_fib             8SVX fibonacci
+A... aac                  AAC (Advanced Audio Coding)
+A... aac_latm             AAC LATM (Advanced Audio Coding LATM syntax)
+...
+ * 
+ */ +public class PipedAudioStream { + + //private final static Logger LOG = Logger.getLogger(PipedAudioStream.class.getName()); + + private final String resource; + private static PipeDecoder pipeDecoder = new PipeDecoder(); + + public static void setDecoder(PipeDecoder decoder){ + pipeDecoder = decoder; + } + + private final PipeDecoder decoder; + public PipedAudioStream(String resource){ + this.resource = AudioResourceUtils.sanitizeResource(resource); + decoder = pipeDecoder; + } + + /** + * Return a one channel, signed PCM stream of audio of a defined sample rate. + * @param targetSampleRate The target sample stream. + * @param startTimeOffset The start time offset. + * @return An audio stream which can be used to read samples from. + */ + public TarsosDSPAudioInputStream getMonoStream(int targetSampleRate,double startTimeOffset){ + return getMonoStream(targetSampleRate, startTimeOffset,-1); + } + + private TarsosDSPAudioFormat getTargetFormat(int targetSampleRate){ + return new TarsosDSPAudioFormat(targetSampleRate, 16, 1, true, false); + } + + + /** + * Return a one channel, signed PCM stream of audio of a defined sample rate. + * @param targetSampleRate The target sample stream. + * @param startTimeOffset The start time offset. + * @param numberOfSeconds the number of seconds to pipe. If negative the stream is processed until end of stream. + * @return An audio stream which can be used to read samples from. + */ + public TarsosDSPAudioInputStream getMonoStream(int targetSampleRate, double startTimeOffset, + double numberOfSeconds) { + InputStream stream = null; + stream = decoder.getDecodedStream(resource, targetSampleRate,startTimeOffset,numberOfSeconds); + return new UniversalAudioInputStream(stream, getTargetFormat(targetSampleRate)); + } +} diff --git a/app/src/main/java/be/tarsos/dsp/io/TarsosDSPAudioFloatConverter.java b/app/src/main/java/be/tarsos/dsp/io/TarsosDSPAudioFloatConverter.java new file mode 100644 index 0000000..b39f53d --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/io/TarsosDSPAudioFloatConverter.java @@ -0,0 +1,1082 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/* + * Copyright 2007 Sun Microsystems, Inc. All Rights Reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Sun designates this + * particular file as subject to the "Classpath" exception as provided + * by Sun in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + */ +package be.tarsos.dsp.io; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.DoubleBuffer; +import java.nio.FloatBuffer; + +import be.tarsos.dsp.io.TarsosDSPAudioFormat.Encoding; + + +/** + * This class is used to convert between 8,16,24,32,32+ bit signed/unsigned + * big/litle endian fixed/floating point byte buffers and float buffers. + * + * @author Karl Helgason + */ +public abstract class TarsosDSPAudioFloatConverter { + + public static final Encoding PCM_FLOAT = new Encoding("PCM_FLOAT"); + + /*************************************************************************** + * + * LSB Filter, used filter least significant byte in samples arrays. + * + * Is used filter out data in lsb byte when SampleSizeInBits is not + * dividable by 8. + * + **************************************************************************/ + + private static class AudioFloatLSBFilter extends TarsosDSPAudioFloatConverter { + + private final TarsosDSPAudioFloatConverter converter; + + final private int offset; + + final private int stepsize; + + final private byte mask; + + private byte[] mask_buffer; + + public AudioFloatLSBFilter(TarsosDSPAudioFloatConverter converter, + TarsosDSPAudioFormat format) { + int bits = format.getSampleSizeInBits(); + boolean bigEndian = format.isBigEndian(); + this.converter = converter; + stepsize = (bits + 7) / 8; + offset = bigEndian ? (stepsize - 1) : 0; + int lsb_bits = bits % 8; + if (lsb_bits == 0) + mask = (byte) 0x00; + else if (lsb_bits == 1) + mask = (byte) 0x80; + else if (lsb_bits == 2) + mask = (byte) 0xC0; + else if (lsb_bits == 3) + mask = (byte) 0xE0; + else if (lsb_bits == 4) + mask = (byte) 0xF0; + else if (lsb_bits == 5) + mask = (byte) 0xF8; + else if (lsb_bits == 6) + mask = (byte) 0xFC; + else if (lsb_bits == 7) + mask = (byte) 0xFE; + else + mask = (byte) 0xFF; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + byte[] ret = converter.toByteArray(in_buff, in_offset, in_len, + out_buff, out_offset); + + int out_offset_end = in_len * stepsize; + for (int i = out_offset + offset; i < out_offset_end; i += stepsize) { + out_buff[i] = (byte) (out_buff[i] & mask); + } + + return ret; + } + + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + if (mask_buffer == null || mask_buffer.length < in_buff.length) + mask_buffer = new byte[in_buff.length]; + System.arraycopy(in_buff, 0, mask_buffer, 0, in_buff.length); + int in_offset_end = out_len * stepsize; + for (int i = in_offset + offset; i < in_offset_end; i += stepsize) { + mask_buffer[i] = (byte) (mask_buffer[i] & mask); + } + float[] ret = converter.toFloatArray(mask_buffer, in_offset, + out_buff, out_offset, out_len); + return ret; + } + + } + + /*************************************************************************** + * + * 64 bit float, little/big-endian + * + **************************************************************************/ + + // PCM 64 bit float, little-endian + private static class AudioFloatConversion64L extends TarsosDSPAudioFloatConverter { + ByteBuffer bytebuffer = null; + + DoubleBuffer floatbuffer = null; + + double[] double_buff = null; + + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int in_len = out_len * 8; + if (bytebuffer == null || bytebuffer.capacity() < in_len) { + bytebuffer = ByteBuffer.allocate(in_len).order( + ByteOrder.LITTLE_ENDIAN); + floatbuffer = bytebuffer.asDoubleBuffer(); + } + bytebuffer.position(0); + floatbuffer.position(0); + bytebuffer.put(in_buff, in_offset, in_len); + if (double_buff == null + || double_buff.length < out_len + out_offset) + double_buff = new double[out_len + out_offset]; + floatbuffer.get(double_buff, out_offset, out_len); + int out_offset_end = out_offset + out_len; + for (int i = out_offset; i < out_offset_end; i++) { + out_buff[i] = (float) double_buff[i]; + } + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int out_len = in_len * 8; + if (bytebuffer == null || bytebuffer.capacity() < out_len) { + bytebuffer = ByteBuffer.allocate(out_len).order( + ByteOrder.LITTLE_ENDIAN); + floatbuffer = bytebuffer.asDoubleBuffer(); + } + floatbuffer.position(0); + bytebuffer.position(0); + if (double_buff == null || double_buff.length < in_offset + in_len) + double_buff = new double[in_offset + in_len]; + int in_offset_end = in_offset + in_len; + for (int i = in_offset; i < in_offset_end; i++) { + double_buff[i] = in_buff[i]; + } + floatbuffer.put(double_buff, in_offset, in_len); + bytebuffer.get(out_buff, out_offset, out_len); + return out_buff; + } + } + + // PCM 64 bit float, big-endian + private static class AudioFloatConversion64B extends TarsosDSPAudioFloatConverter { + ByteBuffer bytebuffer = null; + + DoubleBuffer floatbuffer = null; + + double[] double_buff = null; + + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int in_len = out_len * 8; + if (bytebuffer == null || bytebuffer.capacity() < in_len) { + bytebuffer = ByteBuffer.allocate(in_len).order( + ByteOrder.BIG_ENDIAN); + floatbuffer = bytebuffer.asDoubleBuffer(); + } + bytebuffer.position(0); + floatbuffer.position(0); + bytebuffer.put(in_buff, in_offset, in_len); + if (double_buff == null + || double_buff.length < out_len + out_offset) + double_buff = new double[out_len + out_offset]; + floatbuffer.get(double_buff, out_offset, out_len); + int out_offset_end = out_offset + out_len; + for (int i = out_offset; i < out_offset_end; i++) { + out_buff[i] = (float) double_buff[i]; + } + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int out_len = in_len * 8; + if (bytebuffer == null || bytebuffer.capacity() < out_len) { + bytebuffer = ByteBuffer.allocate(out_len).order( + ByteOrder.BIG_ENDIAN); + floatbuffer = bytebuffer.asDoubleBuffer(); + } + floatbuffer.position(0); + bytebuffer.position(0); + if (double_buff == null || double_buff.length < in_offset + in_len) + double_buff = new double[in_offset + in_len]; + int in_offset_end = in_offset + in_len; + for (int i = in_offset; i < in_offset_end; i++) { + double_buff[i] = in_buff[i]; + } + floatbuffer.put(double_buff, in_offset, in_len); + bytebuffer.get(out_buff, out_offset, out_len); + return out_buff; + } + } + + /*************************************************************************** + * + * 32 bit float, little/big-endian + * + **************************************************************************/ + + // PCM 32 bit float, little-endian + private static class AudioFloatConversion32L extends TarsosDSPAudioFloatConverter { + ByteBuffer bytebuffer = null; + + FloatBuffer floatbuffer = null; + + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int in_len = out_len * 4; + if (bytebuffer == null || bytebuffer.capacity() < in_len) { + bytebuffer = ByteBuffer.allocate(in_len).order( + ByteOrder.LITTLE_ENDIAN); + floatbuffer = bytebuffer.asFloatBuffer(); + } + bytebuffer.position(0); + floatbuffer.position(0); + bytebuffer.put(in_buff, in_offset, in_len); + floatbuffer.get(out_buff, out_offset, out_len); + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int out_len = in_len * 4; + if (bytebuffer == null || bytebuffer.capacity() < out_len) { + bytebuffer = ByteBuffer.allocate(out_len).order( + ByteOrder.LITTLE_ENDIAN); + floatbuffer = bytebuffer.asFloatBuffer(); + } + floatbuffer.position(0); + bytebuffer.position(0); + floatbuffer.put(in_buff, in_offset, in_len); + bytebuffer.get(out_buff, out_offset, out_len); + return out_buff; + } + } + + // PCM 32 bit float, big-endian + private static class AudioFloatConversion32B extends TarsosDSPAudioFloatConverter { + ByteBuffer bytebuffer = null; + + FloatBuffer floatbuffer = null; + + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int in_len = out_len * 4; + if (bytebuffer == null || bytebuffer.capacity() < in_len) { + bytebuffer = ByteBuffer.allocate(in_len).order( + ByteOrder.BIG_ENDIAN); + floatbuffer = bytebuffer.asFloatBuffer(); + } + bytebuffer.position(0); + floatbuffer.position(0); + bytebuffer.put(in_buff, in_offset, in_len); + floatbuffer.get(out_buff, out_offset, out_len); + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int out_len = in_len * 4; + if (bytebuffer == null || bytebuffer.capacity() < out_len) { + bytebuffer = ByteBuffer.allocate(out_len).order( + ByteOrder.BIG_ENDIAN); + floatbuffer = bytebuffer.asFloatBuffer(); + } + floatbuffer.position(0); + bytebuffer.position(0); + floatbuffer.put(in_buff, in_offset, in_len); + bytebuffer.get(out_buff, out_offset, out_len); + return out_buff; + } + } + + /*************************************************************************** + * + * 8 bit signed/unsigned + * + **************************************************************************/ + + // PCM 8 bit, signed + private static class AudioFloatConversion8S extends TarsosDSPAudioFloatConverter { + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < out_len; i++) + out_buff[ox++] = in_buff[ix++] * (1.0f / 127.0f); + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < in_len; i++) + out_buff[ox++] = (byte) (in_buff[ix++] * 127.0f); + return out_buff; + } + } + + // PCM 8 bit, unsigned + private static class AudioFloatConversion8U extends TarsosDSPAudioFloatConverter { + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < out_len; i++) + out_buff[ox++] = ((in_buff[ix++] & 0xFF) - 127) + * (1.0f / 127.0f); + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < in_len; i++) + out_buff[ox++] = (byte) (127 + in_buff[ix++] * 127.0f); + return out_buff; + } + } + + /*************************************************************************** + * + * 16 bit signed/unsigned, little/big-endian + * + **************************************************************************/ + + // PCM 16 bit, signed, little-endian + private static class AudioFloatConversion16SL extends TarsosDSPAudioFloatConverter { + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int ix = in_offset; + int len = out_offset + out_len; + for (int ox = out_offset; ox < len; ox++) { + out_buff[ox] = ((short) ((in_buff[ix++] & 0xFF) | + (in_buff[ix++] << 8))) * (1.0f / 32767.0f); + } + + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int ox = out_offset; + int len = in_offset + in_len; + for (int ix = in_offset; ix < len; ix++) { + int x = (int) (in_buff[ix] * 32767.0); + out_buff[ox++] = (byte) x; + out_buff[ox++] = (byte) (x >>> 8); + } + return out_buff; + } + } + + // PCM 16 bit, signed, big-endian + private static class AudioFloatConversion16SB extends TarsosDSPAudioFloatConverter { + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < out_len; i++) { + out_buff[ox++] = ((short) ((in_buff[ix++] << 8) | + (in_buff[ix++] & 0xFF))) * (1.0f / 32767.0f); + } + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < in_len; i++) { + int x = (int) (in_buff[ix++] * 32767.0); + out_buff[ox++] = (byte) (x >>> 8); + out_buff[ox++] = (byte) x; + } + return out_buff; + } + } + + // PCM 16 bit, unsigned, little-endian + private static class AudioFloatConversion16UL extends TarsosDSPAudioFloatConverter { + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < out_len; i++) { + int x = (in_buff[ix++] & 0xFF) | ((in_buff[ix++] & 0xFF) << 8); + out_buff[ox++] = (x - 32767) * (1.0f / 32767.0f); + } + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < in_len; i++) { + int x = 32767 + (int) (in_buff[ix++] * 32767.0); + out_buff[ox++] = (byte) x; + out_buff[ox++] = (byte) (x >>> 8); + } + return out_buff; + } + } + + // PCM 16 bit, unsigned, big-endian + private static class AudioFloatConversion16UB extends TarsosDSPAudioFloatConverter { + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < out_len; i++) { + int x = ((in_buff[ix++] & 0xFF) << 8) | (in_buff[ix++] & 0xFF); + out_buff[ox++] = (x - 32767) * (1.0f / 32767.0f); + } + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < in_len; i++) { + int x = 32767 + (int) (in_buff[ix++] * 32767.0); + out_buff[ox++] = (byte) (x >>> 8); + out_buff[ox++] = (byte) x; + } + return out_buff; + } + } + + /*************************************************************************** + * + * 24 bit signed/unsigned, little/big-endian + * + **************************************************************************/ + + // PCM 24 bit, signed, little-endian + private static class AudioFloatConversion24SL extends TarsosDSPAudioFloatConverter { + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < out_len; i++) { + int x = (in_buff[ix++] & 0xFF) | ((in_buff[ix++] & 0xFF) << 8) + | ((in_buff[ix++] & 0xFF) << 16); + if (x > 0x7FFFFF) + x -= 0x1000000; + out_buff[ox++] = x * (1.0f / (float)0x7FFFFF); + } + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < in_len; i++) { + int x = (int) (in_buff[ix++] * (float)0x7FFFFF); + if (x < 0) + x += 0x1000000; + out_buff[ox++] = (byte) x; + out_buff[ox++] = (byte) (x >>> 8); + out_buff[ox++] = (byte) (x >>> 16); + } + return out_buff; + } + } + + // PCM 24 bit, signed, big-endian + private static class AudioFloatConversion24SB extends TarsosDSPAudioFloatConverter { + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < out_len; i++) { + int x = ((in_buff[ix++] & 0xFF) << 16) + | ((in_buff[ix++] & 0xFF) << 8) | (in_buff[ix++] & 0xFF); + if (x > 0x7FFFFF) + x -= 0x1000000; + out_buff[ox++] = x * (1.0f / (float)0x7FFFFF); + } + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < in_len; i++) { + int x = (int) (in_buff[ix++] * (float)0x7FFFFF); + if (x < 0) + x += 0x1000000; + out_buff[ox++] = (byte) (x >>> 16); + out_buff[ox++] = (byte) (x >>> 8); + out_buff[ox++] = (byte) x; + } + return out_buff; + } + } + + // PCM 24 bit, unsigned, little-endian + private static class AudioFloatConversion24UL extends TarsosDSPAudioFloatConverter { + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < out_len; i++) { + int x = (in_buff[ix++] & 0xFF) | ((in_buff[ix++] & 0xFF) << 8) + | ((in_buff[ix++] & 0xFF) << 16); + x -= 0x7FFFFF; + out_buff[ox++] = x * (1.0f / (float)0x7FFFFF); + } + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < in_len; i++) { + int x = (int) (in_buff[ix++] * (float)0x7FFFFF); + x += 0x7FFFFF; + out_buff[ox++] = (byte) x; + out_buff[ox++] = (byte) (x >>> 8); + out_buff[ox++] = (byte) (x >>> 16); + } + return out_buff; + } + } + + // PCM 24 bit, unsigned, big-endian + private static class AudioFloatConversion24UB extends TarsosDSPAudioFloatConverter { + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < out_len; i++) { + int x = ((in_buff[ix++] & 0xFF) << 16) + | ((in_buff[ix++] & 0xFF) << 8) | (in_buff[ix++] & 0xFF); + x -= 0x7FFFFF; + out_buff[ox++] = x * (1.0f / (float)0x7FFFFF); + } + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < in_len; i++) { + int x = (int) (in_buff[ix++] * (float)0x7FFFFF); + x += 0x7FFFFF; + out_buff[ox++] = (byte) (x >>> 16); + out_buff[ox++] = (byte) (x >>> 8); + out_buff[ox++] = (byte) x; + } + return out_buff; + } + } + + /*************************************************************************** + * + * 32 bit signed/unsigned, little/big-endian + * + **************************************************************************/ + + // PCM 32 bit, signed, little-endian + private static class AudioFloatConversion32SL extends TarsosDSPAudioFloatConverter { + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < out_len; i++) { + int x = (in_buff[ix++] & 0xFF) | ((in_buff[ix++] & 0xFF) << 8) | + ((in_buff[ix++] & 0xFF) << 16) | + ((in_buff[ix++] & 0xFF) << 24); + out_buff[ox++] = x * (1.0f / (float)0x7FFFFFFF); + } + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < in_len; i++) { + int x = (int) (in_buff[ix++] * (float)0x7FFFFFFF); + out_buff[ox++] = (byte) x; + out_buff[ox++] = (byte) (x >>> 8); + out_buff[ox++] = (byte) (x >>> 16); + out_buff[ox++] = (byte) (x >>> 24); + } + return out_buff; + } + } + + // PCM 32 bit, signed, big-endian + private static class AudioFloatConversion32SB extends TarsosDSPAudioFloatConverter { + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < out_len; i++) { + int x = ((in_buff[ix++] & 0xFF) << 24) | + ((in_buff[ix++] & 0xFF) << 16) | + ((in_buff[ix++] & 0xFF) << 8) | (in_buff[ix++] & 0xFF); + out_buff[ox++] = x * (1.0f / (float)0x7FFFFFFF); + } + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < in_len; i++) { + int x = (int) (in_buff[ix++] * (float)0x7FFFFFFF); + out_buff[ox++] = (byte) (x >>> 24); + out_buff[ox++] = (byte) (x >>> 16); + out_buff[ox++] = (byte) (x >>> 8); + out_buff[ox++] = (byte) x; + } + return out_buff; + } + } + + // PCM 32 bit, unsigned, little-endian + private static class AudioFloatConversion32UL extends TarsosDSPAudioFloatConverter { + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < out_len; i++) { + int x = (in_buff[ix++] & 0xFF) | ((in_buff[ix++] & 0xFF) << 8) | + ((in_buff[ix++] & 0xFF) << 16) | + ((in_buff[ix++] & 0xFF) << 24); + x -= 0x7FFFFFFF; + out_buff[ox++] = x * (1.0f / (float)0x7FFFFFFF); + } + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < in_len; i++) { + int x = (int) (in_buff[ix++] * (float)0x7FFFFFFF); + x += 0x7FFFFFFF; + out_buff[ox++] = (byte) x; + out_buff[ox++] = (byte) (x >>> 8); + out_buff[ox++] = (byte) (x >>> 16); + out_buff[ox++] = (byte) (x >>> 24); + } + return out_buff; + } + } + + // PCM 32 bit, unsigned, big-endian + private static class AudioFloatConversion32UB extends TarsosDSPAudioFloatConverter { + + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < out_len; i++) { + int x = ((in_buff[ix++] & 0xFF) << 24) | + ((in_buff[ix++] & 0xFF) << 16) | + ((in_buff[ix++] & 0xFF) << 8) | (in_buff[ix++] & 0xFF); + x -= 0x7FFFFFFF; + out_buff[ox++] = x * (1.0f / (float)0x7FFFFFFF); + } + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < in_len; i++) { + int x = (int) (in_buff[ix++] * (float)0x7FFFFFFF); + x += 0x7FFFFFFF; + out_buff[ox++] = (byte) (x >>> 24); + out_buff[ox++] = (byte) (x >>> 16); + out_buff[ox++] = (byte) (x >>> 8); + out_buff[ox++] = (byte) x; + } + return out_buff; + } + } + + /*************************************************************************** + * + * 32+ bit signed/unsigned, little/big-endian + * + **************************************************************************/ + + // PCM 32+ bit, signed, little-endian + private static class AudioFloatConversion32xSL extends TarsosDSPAudioFloatConverter { + + final int xbytes; + + public AudioFloatConversion32xSL(int xbytes) { + this.xbytes = xbytes; + } + + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < out_len; i++) { + ix += xbytes; + int x = (in_buff[ix++] & 0xFF) | ((in_buff[ix++] & 0xFF) << 8) + | ((in_buff[ix++] & 0xFF) << 16) + | ((in_buff[ix++] & 0xFF) << 24); + out_buff[ox++] = x * (1.0f / (float)0x7FFFFFFF); + } + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < in_len; i++) { + int x = (int) (in_buff[ix++] * (float)0x7FFFFFFF); + for (int j = 0; j < xbytes; j++) { + out_buff[ox++] = 0; + } + out_buff[ox++] = (byte) x; + out_buff[ox++] = (byte) (x >>> 8); + out_buff[ox++] = (byte) (x >>> 16); + out_buff[ox++] = (byte) (x >>> 24); + } + return out_buff; + } + } + + // PCM 32+ bit, signed, big-endian + private static class AudioFloatConversion32xSB extends TarsosDSPAudioFloatConverter { + + final int xbytes; + + public AudioFloatConversion32xSB(int xbytes) { + this.xbytes = xbytes; + } + + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < out_len; i++) { + int x = ((in_buff[ix++] & 0xFF) << 24) + | ((in_buff[ix++] & 0xFF) << 16) + | ((in_buff[ix++] & 0xFF) << 8) + | (in_buff[ix++] & 0xFF); + ix += xbytes; + out_buff[ox++] = x * (1.0f / (float)0x7FFFFFFF); + } + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < in_len; i++) { + int x = (int) (in_buff[ix++] * (float)0x7FFFFFFF); + out_buff[ox++] = (byte) (x >>> 24); + out_buff[ox++] = (byte) (x >>> 16); + out_buff[ox++] = (byte) (x >>> 8); + out_buff[ox++] = (byte) x; + for (int j = 0; j < xbytes; j++) { + out_buff[ox++] = 0; + } + } + return out_buff; + } + } + + // PCM 32+ bit, unsigned, little-endian + private static class AudioFloatConversion32xUL extends TarsosDSPAudioFloatConverter { + + final int xbytes; + + public AudioFloatConversion32xUL(int xbytes) { + this.xbytes = xbytes; + } + + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < out_len; i++) { + ix += xbytes; + int x = (in_buff[ix++] & 0xFF) | ((in_buff[ix++] & 0xFF) << 8) + | ((in_buff[ix++] & 0xFF) << 16) + | ((in_buff[ix++] & 0xFF) << 24); + x -= 0x7FFFFFFF; + out_buff[ox++] = x * (1.0f / (float)0x7FFFFFFF); + } + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < in_len; i++) { + int x = (int) (in_buff[ix++] * (float)0x7FFFFFFF); + x += 0x7FFFFFFF; + for (int j = 0; j < xbytes; j++) { + out_buff[ox++] = 0; + } + out_buff[ox++] = (byte) x; + out_buff[ox++] = (byte) (x >>> 8); + out_buff[ox++] = (byte) (x >>> 16); + out_buff[ox++] = (byte) (x >>> 24); + } + return out_buff; + } + } + + // PCM 32+ bit, unsigned, big-endian + private static class AudioFloatConversion32xUB extends TarsosDSPAudioFloatConverter { + + final int xbytes; + + public AudioFloatConversion32xUB(int xbytes) { + this.xbytes = xbytes; + } + + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < out_len; i++) { + int x = ((in_buff[ix++] & 0xFF) << 24) | + ((in_buff[ix++] & 0xFF) << 16) | + ((in_buff[ix++] & 0xFF) << 8) | (in_buff[ix++] & 0xFF); + ix += xbytes; + x -= 2147483647; + out_buff[ox++] = x * (1.0f / 2147483647.0f); + } + return out_buff; + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff, int out_offset) { + int ix = in_offset; + int ox = out_offset; + for (int i = 0; i < in_len; i++) { + int x = (int) (in_buff[ix++] * 2147483647.0); + x += 2147483647; + out_buff[ox++] = (byte) (x >>> 24); + out_buff[ox++] = (byte) (x >>> 16); + out_buff[ox++] = (byte) (x >>> 8); + out_buff[ox++] = (byte) x; + for (int j = 0; j < xbytes; j++) { + out_buff[ox++] = 0; + } + } + return out_buff; + } + } + + public static TarsosDSPAudioFloatConverter getConverter(TarsosDSPAudioFormat format) { + TarsosDSPAudioFloatConverter conv = null; + if (format.getFrameSize() == 0) + return null; + if (format.getFrameSize() != + ((format.getSampleSizeInBits() + 7) / 8) * format.getChannels()) { + return null; + } + if (format.getEncoding().equals(Encoding.PCM_SIGNED)) { + if (format.isBigEndian()) { + if (format.getSampleSizeInBits() <= 8) { + conv = new AudioFloatConversion8S(); + } else if (format.getSampleSizeInBits() > 8 && + format.getSampleSizeInBits() <= 16) { + conv = new AudioFloatConversion16SB(); + } else if (format.getSampleSizeInBits() > 16 && + format.getSampleSizeInBits() <= 24) { + conv = new AudioFloatConversion24SB(); + } else if (format.getSampleSizeInBits() > 24 && + format.getSampleSizeInBits() <= 32) { + conv = new AudioFloatConversion32SB(); + } else if (format.getSampleSizeInBits() > 32) { + conv = new AudioFloatConversion32xSB(((format + .getSampleSizeInBits() + 7) / 8) - 4); + } + } else { + if (format.getSampleSizeInBits() <= 8) { + conv = new AudioFloatConversion8S(); + } else if (format.getSampleSizeInBits() > 8 && + format.getSampleSizeInBits() <= 16) { + conv = new AudioFloatConversion16SL(); + } else if (format.getSampleSizeInBits() > 16 && + format.getSampleSizeInBits() <= 24) { + conv = new AudioFloatConversion24SL(); + } else if (format.getSampleSizeInBits() > 24 && + format.getSampleSizeInBits() <= 32) { + conv = new AudioFloatConversion32SL(); + } else if (format.getSampleSizeInBits() > 32) { + conv = new AudioFloatConversion32xSL(((format + .getSampleSizeInBits() + 7) / 8) - 4); + } + } + } else if (format.getEncoding().equals(Encoding.PCM_UNSIGNED)) { + if (format.isBigEndian()) { + if (format.getSampleSizeInBits() <= 8) { + conv = new AudioFloatConversion8U(); + } else if (format.getSampleSizeInBits() > 8 && + format.getSampleSizeInBits() <= 16) { + conv = new AudioFloatConversion16UB(); + } else if (format.getSampleSizeInBits() > 16 && + format.getSampleSizeInBits() <= 24) { + conv = new AudioFloatConversion24UB(); + } else if (format.getSampleSizeInBits() > 24 && + format.getSampleSizeInBits() <= 32) { + conv = new AudioFloatConversion32UB(); + } else if (format.getSampleSizeInBits() > 32) { + conv = new AudioFloatConversion32xUB((( + format.getSampleSizeInBits() + 7) / 8) - 4); + } + } else { + if (format.getSampleSizeInBits() <= 8) { + conv = new AudioFloatConversion8U(); + } else if (format.getSampleSizeInBits() > 8 && + format.getSampleSizeInBits() <= 16) { + conv = new AudioFloatConversion16UL(); + } else if (format.getSampleSizeInBits() > 16 && + format.getSampleSizeInBits() <= 24) { + conv = new AudioFloatConversion24UL(); + } else if (format.getSampleSizeInBits() > 24 && + format.getSampleSizeInBits() <= 32) { + conv = new AudioFloatConversion32UL(); + } else if (format.getSampleSizeInBits() > 32) { + conv = new AudioFloatConversion32xUL((( + format.getSampleSizeInBits() + 7) / 8) - 4); + } + } + } else if (format.getEncoding().equals(PCM_FLOAT)) { + if (format.getSampleSizeInBits() == 32) { + if (format.isBigEndian()) + conv = new AudioFloatConversion32B(); + else + conv = new AudioFloatConversion32L(); + } else if (format.getSampleSizeInBits() == 64) { + if (format.isBigEndian()) + conv = new AudioFloatConversion64B(); + else + conv = new AudioFloatConversion64L(); + } + + } + + if ((format.getEncoding().equals(Encoding.PCM_SIGNED) || + format.getEncoding().equals(Encoding.PCM_UNSIGNED)) && + (format.getSampleSizeInBits() % 8 != 0)) { + conv = new AudioFloatLSBFilter(conv, format); + } + + if (conv != null) + conv.format = format; + return conv; + } + + private TarsosDSPAudioFormat format; + + public TarsosDSPAudioFormat getFormat() { + return format; + } + + public abstract float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_offset, int out_len); + + public float[] toFloatArray(byte[] in_buff, float[] out_buff, + int out_offset, int out_len) { + return toFloatArray(in_buff, 0, out_buff, out_offset, out_len); + } + + public float[] toFloatArray(byte[] in_buff, int in_offset, + float[] out_buff, int out_len) { + return toFloatArray(in_buff, in_offset, out_buff, 0, out_len); + } + + public float[] toFloatArray(byte[] in_buff, float[] out_buff, int out_len) { + return toFloatArray(in_buff, 0, out_buff, 0, out_len); + } + + public float[] toFloatArray(byte[] in_buff, float[] out_buff) { + return toFloatArray(in_buff, 0, out_buff, 0, out_buff.length); + } + + public abstract byte[] toByteArray(float[] in_buff, int in_offset, + int in_len, byte[] out_buff, int out_offset); + + public byte[] toByteArray(float[] in_buff, int in_len, byte[] out_buff, + int out_offset) { + return toByteArray(in_buff, 0, in_len, out_buff, out_offset); + } + + public byte[] toByteArray(float[] in_buff, int in_offset, int in_len, + byte[] out_buff) { + return toByteArray(in_buff, in_offset, in_len, out_buff, 0); + } + + public byte[] toByteArray(float[] in_buff, int in_len, byte[] out_buff) { + return toByteArray(in_buff, 0, in_len, out_buff, 0); + } + + public byte[] toByteArray(float[] in_buff, byte[] out_buff) { + return toByteArray(in_buff, 0, in_buff.length, out_buff, 0); + } +} diff --git a/app/src/main/java/be/tarsos/dsp/io/TarsosDSPAudioFormat.java b/app/src/main/java/be/tarsos/dsp/io/TarsosDSPAudioFormat.java new file mode 100644 index 0000000..a1de2e9 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/io/TarsosDSPAudioFormat.java @@ -0,0 +1,648 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.io; + +/* + * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. Oracle designates this + * particular file as subject to the "Classpath" exception as provided + * by Oracle in the LICENSE file that accompanied this code. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +/** + * AudioFormat is the class that specifies a particular arrangement of data in a sound stream. + * By examing the information stored in the audio format, you can discover how to interpret the bits in the + * binary sound data. + *

+ * Every data LineWavelet has an audio format associated with its data stream. The audio format of a source (playback) data LineWavelet indicates + * what kind of data the data LineWavelet expects to receive for output. For a target (capture) data LineWavelet, the audio format specifies the kind + * of the data that can be read from the LineWavelet. + * Sound files also have audio formats, of course. + *

+ * The AudioFormat class accommodates a number of common sound-file encoding techniques, including + * pulse-code modulation (PCM), mu-law encoding, and a-law encoding. These encoding techniques are predefined, + * but service providers can create new encoding types. + * The encoding that a specific format uses is named by its encoding field. + *

+ * In addition to the encoding, the audio format includes other properties that further specify the exact + * arrangement of the data. + * These include the number of channels, sample rate, sample size, byte order, frame rate, and frame size. + * Sounds may have different numbers of audio channels: one for mono, two for stereo. + * The sample rate measures how many "snapshots" (samples) of the sound pressure are taken per second, per channel. + * (If the sound is stereo rather than mono, two samples are actually measured at each instant of time: one for the left channel, + * and another for the right channel; however, the sample rate still measures the number per channel, so the rate is the same + * regardless of the number of channels. This is the standard use of the term.) + * The sample size indicates how many bits are used to store each snapshot; 8 and 16 are typical values. + * For 16-bit samples (or any other sample size larger than a byte), + * byte order is important; the bytes in each sample are arranged in + * either the "little-endian" or "big-endian" style. + * For encodings like PCM, a frame consists of the set of samples for all channels at a given + * point in time, and so the size of a frame (in bytes) is always equal to the size of a sample (in bytes) times + * the number of channels. However, with some other sorts of encodings a frame can contain + * a bundle of compressed data for a whole series of samples, as well as additional, non-sample + * data. For such encodings, the sample rate and sample size refer to the data after it is decoded into PCM, + * and so they are completely different from the frame rate and frame size. + * + *

An AudioFormat object can include a set of + * properties. A property is a pair of key and value: the key + * is of type String, the associated property + * value is an arbitrary object. Properties specify + * additional format specifications, like the bit rate for + * compressed formats. Properties are mainly used as a means + * to transport additional information of the audio format + * to and from the service providers. Therefore, properties + * are ignored in the AudioFormat method. + * + *

The following table lists some common properties which + * service providers should use, if applicable: + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
A table with service providers
Property keyValue typeDescription
"bitrate"{@link java.lang.Integer Integer}average bit rate in bits per second
"vbr"{@link java.lang.Boolean Boolean}true, if the file is encoded in variable bit + * rate (VBR)
"quality"{@link java.lang.Integer Integer}encoding/conversion quality, 1..100
+ * + *

Vendors of service providers (plugins) are encouraged + * to seek information about other already established + * properties in third party plugins, and follow the same + * conventions. + * + * @author Kara Kytle + * @author Florian Bomers + * @since 1.3 + */ +public class TarsosDSPAudioFormat { + + // INSTANCE VARIABLES + + + /** + * The audio encoding technique used by this format. + */ + protected Encoding encoding; + + /** + * The number of samples played or recorded per second, for sounds that have this format. + */ + protected float sampleRate; + + /** + * The number of bits in each sample of a sound that has this format. + */ + protected int sampleSizeInBits; + + /** + * The number of audio channels in this format (1 for mono, 2 for stereo). + */ + protected int channels; + + /** + * The number of bytes in each frame of a sound that has this format. + */ + protected int frameSize; + + /** + * The number of frames played or recorded per second, for sounds that have this format. + */ + protected float frameRate; + + /** + * Indicates whether the audio data is stored in big-endian or little-endian order. + */ + protected boolean bigEndian; + + + /** The set of properties */ + private HashMap properties; + + public static final int NOT_SPECIFIED = -1; + + + /** + * Constructs an AudioFormat with the given parameters. + * The encoding specifies the convention used to represent the data. + * The other parameters are further explained in the + * @param encoding the audio encoding technique + * @param sampleRate the number of samples per second + * @param sampleSizeInBits the number of bits in each sample + * @param channels the number of channels (1 for mono, 2 for stereo, and so on) + * @param frameSize the number of bytes in each frame + * @param frameRate the number of frames per second + * @param bigEndian indicates whether the data for a single sample + * is stored in big-endian byte order (false + * means little-endian) + */ + public TarsosDSPAudioFormat(Encoding encoding, float sampleRate, int sampleSizeInBits, + int channels, int frameSize, float frameRate, boolean bigEndian) { + + this.encoding = encoding; + this.sampleRate = sampleRate; + this.sampleSizeInBits = sampleSizeInBits; + this.channels = channels; + this.frameSize = frameSize; + this.frameRate = frameRate; + this.bigEndian = bigEndian; + this.properties = null; + } + + + /** + * Constructs an AudioFormat with the given parameters. + * The encoding specifies the convention used to represent the data. + * The other parameters are further explained in the + * @param encoding the audio encoding technique + * @param sampleRate the number of samples per second + * @param sampleSizeInBits the number of bits in each sample + * @param channels the number of channels (1 for mono, 2 for + * stereo, and so on) + * @param frameSize the number of bytes in each frame + * @param frameRate the number of frames per second + * @param bigEndian indicates whether the data for a single sample + * is stored in big-endian byte order + * (false means little-endian) + * @param properties a Map<String,Object> object + * containing format properties + * + * @since 1.5 + */ + public TarsosDSPAudioFormat(Encoding encoding, float sampleRate, + int sampleSizeInBits, int channels, + int frameSize, float frameRate, + boolean bigEndian, Map properties) { + this(encoding, sampleRate, sampleSizeInBits, channels, + frameSize, frameRate, bigEndian); + this.properties = new HashMap(properties); + } + + + /** + * Constructs an AudioFormat with a linear PCM encoding and + * the given parameters. The frame size is set to the number of bytes + * required to contain one sample from each channel, and the frame rate + * is set to the sample rate. + * + * @param sampleRate the number of samples per second + * @param sampleSizeInBits the number of bits in each sample + * @param channels the number of channels (1 for mono, 2 for stereo, and so on) + * @param signed indicates whether the data is signed or unsigned + * @param bigEndian indicates whether the data for a single sample + * is stored in big-endian byte order (false + * means little-endian) + */ + public TarsosDSPAudioFormat(float sampleRate, int sampleSizeInBits, + int channels, boolean signed, boolean bigEndian) { + + this((signed ? Encoding.PCM_SIGNED : Encoding.PCM_UNSIGNED), + sampleRate, + sampleSizeInBits, + channels, + (channels == NOT_SPECIFIED || sampleSizeInBits == NOT_SPECIFIED)? + NOT_SPECIFIED: + ((sampleSizeInBits + 7) / 8) * channels, + sampleRate, + bigEndian); + } + + /** + * Obtains the type of encoding for sounds in this format. + * + * @return the encoding type + * @see Encoding#PCM_SIGNED + * @see Encoding#PCM_UNSIGNED + * @see Encoding#ULAW + * @see Encoding#ALAW + */ + public Encoding getEncoding() { + + return encoding; + } + + /** + * Obtains the sample rate. + * For compressed formats, the return value is the sample rate of the uncompressed + * audio data. + * When this AudioFormat is used for queries capabilities , a sample rate of + * AudioSystem.NOT_SPECIFIED means that any sample rate is + * acceptable. AudioSystem.NOT_SPECIFIED is also returned when + * the sample rate is not defined for this audio format. + * @return the number of samples per second, + * or AudioSystem.NOT_SPECIFIED + * + * @see #getFrameRate() + */ + public float getSampleRate() { + + return sampleRate; + } + + /** + * Obtains the size of a sample. + * For compressed formats, the return value is the sample size of the + * uncompressed audio data. + * When this AudioFormat is used for queries or capabilities , a sample size of + * AudioSystem.NOT_SPECIFIED means that any sample size is + * acceptable. AudioSystem.NOT_SPECIFIED is also returned when + * the sample size is not defined for this audio format. + * @return the number of bits in each sample, + * or AudioSystem.NOT_SPECIFIED + * + * @see #getFrameSize() + */ + public int getSampleSizeInBits() { + + return sampleSizeInBits; + } + + /** + * Obtains the number of channels. + * When this AudioFormat is used for queries or capabilities , a return value of + * AudioSystem.NOT_SPECIFIED means that any (positive) number of channels is + * acceptable. + * @return The number of channels (1 for mono, 2 for stereo, etc.), + * or AudioSystem.NOT_SPECIFIED + * + */ + public int getChannels() { + + return channels; + } + + /** + * Obtains the frame size in bytes. + * When this AudioFormat is used for queries or capabilities, a frame size of + * AudioSystem.NOT_SPECIFIED means that any frame size is + * acceptable. AudioSystem.NOT_SPECIFIED is also returned when + * the frame size is not defined for this audio format. + * @return the number of bytes per frame, + * or AudioSystem.NOT_SPECIFIED + * + * @see #getSampleSizeInBits() + */ + public int getFrameSize() { + + return frameSize; + } + + /** + * Obtains the frame rate in frames per second. + * When this AudioFormat is used for queries or capabilities , a frame rate of + * AudioSystem.NOT_SPECIFIED means that any frame rate is + * acceptable. AudioSystem.NOT_SPECIFIED is also returned when + * the frame rate is not defined for this audio format. + * @return the number of frames per second, + * or AudioSystem.NOT_SPECIFIED + * + * @see #getSampleRate() + */ + public float getFrameRate() { + + return frameRate; + } + + + /** + * Indicates whether the audio data is stored in big-endian or little-endian + * byte order. If the sample size is not more than one byte, the return value is + * irrelevant. + * @return true if the data is stored in big-endian byte order, + * false if little-endian + */ + public boolean isBigEndian() { + + return bigEndian; + } + + + /** + * Obtain an unmodifiable map of properties. + * The concept of properties is further explained in + * the. + * + * @return a Map<String,Object> object containing + * all properties. If no properties are recognized, an empty map is + * returned. + * + * @see #getProperty(String) + * @since 1.5 + */ + @SuppressWarnings("unchecked") + public Map properties() { + Map ret; + if (properties == null) { + ret = new HashMap(0); + } else { + ret = (Map) (properties.clone()); + } + return (Map) Collections.unmodifiableMap(ret); + } + + + /** + * Obtain the property value specified by the key. + * The concept of properties is further explained in + * the. + * + *

If the specified property is not defined for a + * particular file format, this method returns + * null. + * + * @param key the key of the desired property + * @return the value of the property with the specified key, + * or null if the property does not exist. + * + * @see #properties() + * @since 1.5 + */ + public Object getProperty(String key) { + if (properties == null) { + return null; + } + return properties.get(key); + } + + + /** + * Indicates whether this format matches the one specified. To match, + * two formats must have the same encoding, the same number of channels, + * and the same number of bits per sample and bytes per frame. + * The two formats must also have the same sample rate, + * unless the specified format has the sample rate value AudioSystem.NOT_SPECIFIED, + * which any sample rate will match. The frame rates must + * similarly be equal, unless the specified format has the frame rate + * value AudioSystem.NOT_SPECIFIED. The byte order (big-endian or little-endian) + * must match if the sample size is greater than one byte. + * + * @param format format to test for match + * @return true if this format matches the one specified, + * false otherwise. + */ + /* + * $$kk: 04.20.99: i changed the semantics of this. + */ + public boolean matches(TarsosDSPAudioFormat format) { + + return format.getEncoding().equals(getEncoding()) && + ((format.getSampleRate() == (float) NOT_SPECIFIED) || (format.getSampleRate() == getSampleRate())) && + (format.getSampleSizeInBits() == getSampleSizeInBits()) && + (format.getChannels() == getChannels() && + (format.getFrameSize() == getFrameSize()) && + ((format.getFrameRate() == (float) NOT_SPECIFIED) || (format.getFrameRate() == getFrameRate())) && + ((format.getSampleSizeInBits() <= 8) || (format.isBigEndian() == isBigEndian()))); + } + + + /** + * Returns a string that describes the format, such as: + * "PCM SIGNED 22050 Hz 16 bit mono big-endian". The contents of the string + * may vary between implementations of Java Sound. + * + * @return a string that describes the format parameters + */ + public String toString() { + String sEncoding = ""; + if (getEncoding() != null) { + sEncoding = getEncoding().toString() + " "; + } + + String sSampleRate; + if (getSampleRate() == (float) NOT_SPECIFIED) { + sSampleRate = "unknown sample rate, "; + } else { + sSampleRate = getSampleRate() + " Hz, "; + } + + String sSampleSizeInBits; + if (getSampleSizeInBits() == (float) NOT_SPECIFIED) { + sSampleSizeInBits = "unknown bits per sample, "; + } else { + sSampleSizeInBits = getSampleSizeInBits() + " bit, "; + } + + String sChannels; + if (getChannels() == 1) { + sChannels = "mono, "; + } else + if (getChannels() == 2) { + sChannels = "stereo, "; + } else { + if (getChannels() == NOT_SPECIFIED) { + sChannels = " unknown number of channels, "; + } else { + sChannels = getChannels()+" channels, "; + } + } + + String sFrameSize; + if (getFrameSize() == (float) NOT_SPECIFIED) { + sFrameSize = "unknown frame size, "; + } else { + sFrameSize = getFrameSize()+ " bytes/frame, "; + } + + String sFrameRate = ""; + if (Math.abs(getSampleRate() - getFrameRate()) > 0.00001) { + if (getFrameRate() == (float) NOT_SPECIFIED) { + sFrameRate = "unknown frame rate, "; + } else { + sFrameRate = getFrameRate() + " frames/second, "; + } + } + + String sEndian = ""; + if ((getEncoding().equals(Encoding.PCM_SIGNED) + || getEncoding().equals(Encoding.PCM_UNSIGNED)) + && ((getSampleSizeInBits() > 8) + || (getSampleSizeInBits() == NOT_SPECIFIED))) { + if (isBigEndian()) { + sEndian = "big-endian"; + } else { + sEndian = "little-endian"; + } + } + + return sEncoding + + sSampleRate + + sSampleSizeInBits + + sChannels + + sFrameSize + + sFrameRate + + sEndian; + + } + + /** + * The Encoding class names the specific type of data representation + * used for an audio stream. The encoding includes aspects of the + * sound format other than the number of channels, sample rate, sample size, + * frame rate, frame size, and byte order. + *

+ * One ubiquitous type of audio encoding is pulse-code modulation (PCM), + * which is simply a linear (proportional) representation of the sound + * waveform. With PCM, the number stored in each sample is proportional + * to the instantaneous amplitude of the sound pressure at that point in + * time. The numbers are frequently signed or unsigned integers. + * Besides PCM, other encodings include mu-law and a-law, which are nonlinear + * mappings of the sound amplitude that are often used for recording speech. + *

+ * You can use a predefined encoding by referring to one of the static + * objects created by this class, such as PCM_SIGNED or + * PCM_UNSIGNED. Service providers can create new encodings, such as + * compressed audio formats or floating-point PCM samples, and make + * these available through the AudioSystem class. + *

+ * The Encoding class is static, so that all + * AudioFormat objects that have the same encoding will refer + * to the same object (rather than different instances of the same class). + * This allows matches to be made by checking that two format's encodings + * are equal. + * + * @author Kara Kytle + * @since 1.3 + */ + public static class Encoding { + + + // ENCODING DEFINES + + /** + * Specifies signed, linear PCM data. + */ + public static final Encoding PCM_SIGNED = new Encoding("PCM_SIGNED"); + + /** + * Specifies unsigned, linear PCM data. + */ + public static final Encoding PCM_UNSIGNED = new Encoding("PCM_UNSIGNED"); + + /** + * Specifies u-law encoded data. + */ + public static final Encoding ULAW = new Encoding("ULAW"); + + /** + * Specifies a-law encoded data. + */ + public static final Encoding ALAW = new Encoding("ALAW"); + + + // INSTANCE VARIABLES + + /** + * Encoding name. + */ + private final String name; + + + // CONSTRUCTOR + + /** + * Constructs a new encoding. + * @param name the name of the new type of encoding + */ + public Encoding(String name) { + this.name = name; + } + + + // METHODS + + /** + * Finalizes the equals method + */ + public final boolean equals(Object obj) { + if (toString() == null) { + return (obj != null) && (obj.toString() == null); + } + if (obj instanceof Encoding) { + return toString().equals(obj.toString()); + } + return false; + } + + /** + * Finalizes the hashCode method + */ + public final int hashCode() { + if (toString() == null) { + return 0; + } + return toString().hashCode(); + } + + /** + * Provides the String representation of the encoding. This String is + * the same name that was passed to the constructor. For the predefined encodings, the name + * is similar to the encoding's variable (field) name. For example, PCM_SIGNED.toString() returns + * the name "pcm_signed". + * + * @return the encoding name + */ + public final String toString() { + return name; + } + + } // class Encoding +} diff --git a/app/src/main/java/be/tarsos/dsp/io/TarsosDSPAudioInputStream.java b/app/src/main/java/be/tarsos/dsp/io/TarsosDSPAudioInputStream.java new file mode 100644 index 0000000..70491d2 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/io/TarsosDSPAudioInputStream.java @@ -0,0 +1,76 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.io; + +import java.io.IOException; + +/** + * Decouples the audio input stream + * @author Joren Six + */ +public interface TarsosDSPAudioInputStream { + + /** + * Skip a number of bytes before reading the remaining bytes. + * @param bytesToSkip The number of bytes to skip. + * @return The number of bytes skipped. + * @throws IOException If the underlying if an input or output error occurs + * #see read + */ + long skip(long bytesToSkip) throws IOException; + + /** + * Reads up to a specified maximum number of bytes of data from the audio + * stream, putting them into the given byte array. + *

This method will always read an integral number of frames. + * If len does not specify an integral number + * of frames, a maximum of len - (len % frameSize) + * bytes will be read. + * + * @param b the buffer into which the data is read + * @param off the offset, from the beginning of array b, at which + * the data will be written + * @param len the maximum number of bytes to read + * @return the total number of bytes read into the buffer, or -1 if there + * is no more data because the end of the stream has been reached + * @throws IOException if an input or output error occurs + * @see #skip + */ + int read(byte[] b, int off, int len) throws IOException ; + + /** + * Closes this audio input stream and releases any system resources associated + * with the stream. + * @throws IOException if an input or output error occurs + */ + void close() throws IOException; + + /** + * + * @return The format of the underlying audio + */ + TarsosDSPAudioFormat getFormat(); + + long getFrameLength(); +} diff --git a/app/src/main/java/be/tarsos/dsp/io/UniversalAudioInputStream.java b/app/src/main/java/be/tarsos/dsp/io/UniversalAudioInputStream.java new file mode 100644 index 0000000..daffa58 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/io/UniversalAudioInputStream.java @@ -0,0 +1,72 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.io; + +import java.io.IOException; +import java.io.InputStream; + +public class UniversalAudioInputStream implements TarsosDSPAudioInputStream { + + private final InputStream underlyingStream; + private final TarsosDSPAudioFormat format; + + public UniversalAudioInputStream(InputStream underlyingInputStream, TarsosDSPAudioFormat format){ + this.underlyingStream = underlyingInputStream; + this.format = format; + } + + @Override + public long skip(long bytesToSkip) throws IOException { + //the skip probably + int bytesSkipped = 0; + for(int i = 0 ; i < bytesToSkip ; i++){ + int theByte = underlyingStream.read(); + if(theByte!=-1){ + bytesSkipped++; + } + } + return bytesSkipped; + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + return underlyingStream.read(b, off, len); + } + + @Override + public void close() throws IOException { + underlyingStream.close(); + } + + @Override + public TarsosDSPAudioFormat getFormat() { + return format; + } + + @Override + public long getFrameLength() { + return -1; + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/io/package-info.java b/app/src/main/java/be/tarsos/dsp/io/package-info.java new file mode 100644 index 0000000..84113c5 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/io/package-info.java @@ -0,0 +1,28 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/** + * Contains an abstraction of audio I/O. This is needed to support both the Java runtime and Dalvik/Android. + */ +package be.tarsos.dsp.io; diff --git a/app/src/main/java/be/tarsos/dsp/mfcc/DCT.java b/app/src/main/java/be/tarsos/dsp/mfcc/DCT.java new file mode 100644 index 0000000..9c301b9 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/mfcc/DCT.java @@ -0,0 +1,155 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.mfcc; + +import java.io.*; +import java.util.StringTokenizer; + +public class DCT { + + int[][] f; + int[][] g; + int[][] inv; + + static public void main(String[] args) { + + int[][] fm = new int[8][8]; + + if ( args.length != 1 ) { + System.out.println("usage: java DCT "); + return; + } + + File f = new File(args[0]); + if ( !f.canRead() ) { + System.out.println("Error! can't open "+args[0]+" for reading"); + return; + } + try { + @SuppressWarnings("resource") + BufferedReader br = new BufferedReader(new FileReader(f)); + for ( int i = 0; i < 8; i++ ) { + String line = br.readLine(); + StringTokenizer tok = new StringTokenizer(line,", "); + if ( tok.countTokens() != 8 ) { + System.out.println("Error! File format error: 8 tokens required!"); + throw new IOException("Error"); + } + for ( int j = 0; j < 8; j++ ) { + String numstr = tok.nextToken(); + int num = Integer.parseInt(numstr); + fm[i][j] = num; + } + } + br.close(); + } + catch ( FileNotFoundException e ) { + System.out.println("Error! can't create FileReader for "+args[0]); + return; + } + catch ( IOException e ) { + System.out.println("Error! during read of "+args[0]); + return; + } + catch ( NumberFormatException e ) { + System.out.println("Error! NumberFormatExecption"); + return; + } + + DCT dct = new DCT(fm); + dct.transform(); + dct.printout(); + dct.inverse(); + dct.printoutinv(); + } + + public DCT(int[][] f) { + this.f = f; + } + + public void transform() { + g = new int[8][8]; + + for ( int i = 0; i < 8; i++ ) { + for ( int j = 0; j < 8; j++ ) { + double ge = 0.0; + for ( int x = 0; x < 8; x++ ) { + for ( int y = 0; y < 8; y++ ) { + double cg1 = (2.0*(double)x+1.0)*(double)i*Math.PI/16.0; + double cg2 = (2.0*(double)y+1.0)*(double)j*Math.PI/16.0; + + ge += ((double)f[x][y]) * Math.cos(cg1) * Math.cos(cg2); + + } + } + double ci = ((i==0)?1.0/Math.sqrt(2.0):1.0); + double cj = ((j==0)?1.0/Math.sqrt(2.0):1.0); + ge *= ci * cj * 0.25; + g[i][j] = (int)Math.round(ge); + } + } + } + + + public void inverse() { + inv = new int[8][8]; + + for ( int x = 0; x < 8; x++ ) { + for ( int y = 0; y < 8; y++ ) { + double ge = 0.0; + for ( int i = 0; i < 8; i++ ) { + double cg1 = (2.0*(double)x + 1.0)*(double)i*Math.PI/16.0; + double ci = ((i==0)?1.0/Math.sqrt(2.0):1.0); + for ( int j = 0; j < 8; j++ ) { + double cg2 = (2.0*(double)y + 1.0)*(double)j*Math.PI/16.0; + double cj = ((j==0)?1.0/Math.sqrt(2.0):1.0); + double cij4 = ci*cj*0.25; + ge += cij4 * Math.cos(cg1) * Math.cos(cg2) * (double)g[i][j]; + } + } + inv[x][y] = (int)Math.round(ge); + } + } + } + + public void printout() { + for ( int i = 0; i < 8; i++ ) { + System.out.print("\n"); + for ( int k = 0; k < 8; k++ ) { + System.out.print(g[i][k]+" "); + } + } + } + + public void printoutinv() { + for ( int i = 0; i < 8; i++ ) { + System.out.print("\n"); + for ( int k = 0; k < 8; k++ ) { + System.out.print(inv[i][k]+" "); + } + } + } +} + + diff --git a/app/src/main/java/be/tarsos/dsp/mfcc/MFCC.java b/app/src/main/java/be/tarsos/dsp/mfcc/MFCC.java new file mode 100644 index 0000000..61444b3 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/mfcc/MFCC.java @@ -0,0 +1,268 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.mfcc; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; +import be.tarsos.dsp.util.fft.FFT; +import be.tarsos.dsp.util.fft.HammingWindow; + + +public class MFCC implements AudioProcessor { + + private final int amountOfCepstrumCoef; //Number of MFCCs per frame + protected int amountOfMelFilters; //Number of mel filters (SPHINX-III uses 40) + protected float lowerFilterFreq; //lower limit of filter (or 64 Hz?) + protected float upperFilterFreq; //upper limit of filter (or half of sampling freq.?) + + float[] audioFloatBuffer; + //Er zijn evenveel mfccs als er frames zijn!? + //Per frame zijn er dan CEPSTRA coeficienten + private float[] mfcc; + + int[] centerFrequencies; + + private final FFT fft; + private final int samplesPerFrame; + private final float sampleRate; + + public MFCC(int samplesPerFrame, int sampleRate){ + this(samplesPerFrame, sampleRate, 30, 30, 133.3334f, ((float)sampleRate)/2f); + } + + public MFCC(int samplesPerFrame, float sampleRate, int amountOfCepstrumCoef, int amountOfMelFilters, float lowerFilterFreq, float upperFilterFreq) { + this.samplesPerFrame = samplesPerFrame; + this.sampleRate = sampleRate; + this.amountOfCepstrumCoef = amountOfCepstrumCoef; + this.amountOfMelFilters = amountOfMelFilters; + this.fft = new FFT(samplesPerFrame, new HammingWindow()); + + this.lowerFilterFreq = Math.max(lowerFilterFreq, 25); + this.upperFilterFreq = Math.min(upperFilterFreq, sampleRate / 2); + calculateFilterBanks(); + } + + @Override + public boolean process(AudioEvent audioEvent) { + audioFloatBuffer = audioEvent.getFloatBuffer().clone(); + + // Magnitude Spectrum + float[] bin = magnitudeSpectrum(audioFloatBuffer); + // get Mel Filterbank + float[] fbank = melFilter(bin, centerFrequencies); + // Non-linear transformation + float[] f = nonLinearTransformation(fbank); + // Cepstral coefficients + mfcc = cepCoefficients(f); + + return true; + } + + @Override + public void processingFinished() { + + } + + /** + * computes the magnitude spectrum of the input frame
+ * calls: none
+ * called by: featureExtraction + * @param frame Input frame signal + * @return Magnitude Spectrum array + */ + public float[] magnitudeSpectrum(float[] frame){ + float[] magSpectrum = new float[frame.length]; + + // calculate FFT for current frame + + fft.forwardTransform(frame); + + // calculate magnitude spectrum + for (int k = 0; k < frame.length/2; k++){ + magSpectrum[frame.length/2+k] = fft.modulus(frame, frame.length/2-1-k); + magSpectrum[frame.length/2-1-k] = magSpectrum[frame.length/2+k]; + } + + return magSpectrum; + } + + /** + * calculates the FFT bin indices
calls: none
called by: + * featureExtraction + * + */ + + public final void calculateFilterBanks() { + centerFrequencies = new int[amountOfMelFilters + 2]; + + centerFrequencies[0] = Math.round(lowerFilterFreq / sampleRate * samplesPerFrame); + centerFrequencies[centerFrequencies.length - 1] = (int) (samplesPerFrame / 2); + + double[] mel = new double[2]; + mel[0] = freqToMel(lowerFilterFreq); + mel[1] = freqToMel(upperFilterFreq); + + float factor = (float)((mel[1] - mel[0]) / (amountOfMelFilters + 1)); + //Calculates te centerfrequencies. + for (int i = 1; i <= amountOfMelFilters; i++) { + float fc = (inverseMel(mel[0] + factor * i) / sampleRate) * samplesPerFrame; + centerFrequencies[i] = Math.round(fc); + } + + } + + + /** + * the output of mel filtering is subjected to a logarithm function (natural logarithm)
+ * calls: none
+ * called by: featureExtraction + * @param fbank Output of mel filtering + * @return Natural log of the output of mel filtering + */ + public float[] nonLinearTransformation(float[] fbank){ + float[] f = new float[fbank.length]; + final float FLOOR = -50; + + for (int i = 0; i < fbank.length; i++){ + f[i] = (float) Math.log(fbank[i]); + + // check if ln() returns a value less than the floor + if (f[i] < FLOOR) f[i] = FLOOR; + } + + return f; + } + + /** + * Calculate the output of the mel filter
calls: none called by: + * featureExtraction + * @param bin The bins. + * @param centerFrequencies The frequency centers. + * @return Output of mel filter. + */ + public float[] melFilter(float[] bin, int[] centerFrequencies) { + float[] temp = new float[amountOfMelFilters + 2]; + + for (int k = 1; k <= amountOfMelFilters; k++) { + float num1 = 0, num2 = 0; + + float den = (centerFrequencies[k] - centerFrequencies[k - 1] + 1); + + for (int i = centerFrequencies[k - 1]; i <= centerFrequencies[k]; i++) { + num1 += bin[i] * (i - centerFrequencies[k - 1] + 1); + } + num1 /= den; + + den = (centerFrequencies[k + 1] - centerFrequencies[k] + 1); + + for (int i = centerFrequencies[k] + 1; i <= centerFrequencies[k + 1]; i++) { + num2 += bin[i] * (1 - ((i - centerFrequencies[k]) / den)); + } + + temp[k] = num1 + num2; + } + + float[] fbank = new float[amountOfMelFilters]; + + System.arraycopy(temp, 1, fbank, 0, amountOfMelFilters); + + return fbank; + } + + + /** + * Cepstral coefficients are calculated from the output of the Non-linear Transformation method
+ * calls: none
+ * called by: featureExtraction + * @param f Output of the Non-linear Transformation method + * @return Cepstral Coefficients + */ + public float[] cepCoefficients(float[] f){ + float[] cepc = new float[amountOfCepstrumCoef]; + + for (int i = 0; i < cepc.length; i++){ + for (int j = 0; j < f.length; j++){ + cepc[i] += f[j] * Math.cos(Math.PI * i / f.length * (j + 0.5)); + } + } + + return cepc; + } + +// /** +// * calculates center frequency
+// * calls: none
+// * called by: featureExtraction +// * @param i Index of mel filters +// * @return Center Frequency +// */ +// private static float centerFreq(int i,float samplingRate){ +// double mel[] = new double[2]; +// mel[0] = freqToMel(lowerFilterFreq); +// mel[1] = freqToMel(samplingRate / 2); +// +// // take inverse mel of: +// double temp = mel[0] + ((mel[1] - mel[0]) / (amountOfMelFilters + 1)) * i; +// return inverseMel(temp); +// } + + /** + * convert frequency to mel-frequency
+ * calls: none
+ * called by: featureExtraction + * @param freq Frequency + * @return Mel-Frequency + */ + protected static float freqToMel(float freq){ + return (float) (2595 * log10(1 + freq / 700)); + } + + /** + * calculates the inverse of Mel Frequency
+ * calls: none
+ * called by: featureExtraction + */ + private static float inverseMel(double x) { + return (float) (700 * (Math.pow(10, x / 2595) - 1)); + } + + /** + * calculates logarithm with base 10
+ * calls: none
+ * called by: featureExtraction + * @param value Number to take the log of + * @return base 10 logarithm of the input values + */ + protected static float log10(float value){ + return (float) (Math.log(value) / Math.log(10)); + } + + public float[] getMFCC() { + return mfcc.clone(); + } + + public int[] getCenterFrequencies() { + return centerFrequencies; + } +} diff --git a/app/src/main/java/be/tarsos/dsp/mfcc/package-info.java b/app/src/main/java/be/tarsos/dsp/mfcc/package-info.java new file mode 100644 index 0000000..7246def --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/mfcc/package-info.java @@ -0,0 +1,28 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/** + * Contains an MFCC implementation. + */ +package be.tarsos.dsp.mfcc; diff --git a/app/src/main/java/be/tarsos/dsp/onsets/BeatRootSpectralFluxOnsetDetector.java b/app/src/main/java/be/tarsos/dsp/onsets/BeatRootSpectralFluxOnsetDetector.java new file mode 100644 index 0000000..0cd96cb --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/onsets/BeatRootSpectralFluxOnsetDetector.java @@ -0,0 +1,279 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.onsets; + +import java.util.Arrays; +import java.util.Iterator; +import java.util.LinkedList; + +import be.tarsos.dsp.AudioDispatcher; +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; +import be.tarsos.dsp.beatroot.Peaks; +import be.tarsos.dsp.util.fft.FFT; +import be.tarsos.dsp.util.fft.ScaledHammingWindow; + +/** + *

+ * A non real-time spectral flux onset detection method, as implemented in the + * BeatRoot system of Centre for Digital Music, Queen Mary, University of + * London. + *

+ * + *

+ * This onset detection function does not, NOT work in real-time. It analyzes an + * audio-stream and detects onsets during a post processing step. + *

+ * + * @author Joren Six + * @author Simon Dixon + */ +public class BeatRootSpectralFluxOnsetDetector implements AudioProcessor, OnsetDetector { + /** RMS amplitude of the current frame. */ + private double frameRMS; + + /** The number of overlapping frames of audio data which have been read. */ + private int frameCount; + + /** Long term average frame energy (in frequency domain representation). */ + private double ltAverage; + + /** The real part of the data for the in-place FFT computation. + * Since input data is real, this initially contains the input data. */ + private float[] reBuffer; + + /** The imaginary part of the data for the in-place FFT computation. + * Since input data is real, this initially contains zeros. */ + private final float[] imBuffer; + + /** Spectral flux onset detection function, indexed by frame. */ + private final double[] spectralFlux; + + /** A mapping function for mapping FFT bins to final frequency bins. + * The mapping is linear (1-1) until the resolution reaches 2 points per + * semitone, then logarithmic with a semitone resolution. e.g. for + * 44.1kHz sampling rate and fftSize of 2048 (46ms), bin spacing is + * 21.5Hz, which is mapped linearly for bins 0-34 (0 to 732Hz), and + * logarithmically for the remaining bins (midi notes 79 to 127, bins 35 to + * 83), where all energy above note 127 is mapped into the final bin. */ + private int[] freqMap; + + /** The number of entries in freqMap. Note that the length of + * the array is greater, because its size is not known at creation time. */ + private int freqMapSize; + + /** The magnitude spectrum of the most recent frame. + * Used for calculating the spectral flux. */ + private float[] prevFrame; + + /** The magnitude spectrum of the current frame. */ + private final double[] newFrame; + + /** The magnitude spectra of all frames, used for plotting the spectrogram. */ + private final double[][] frames; + + /** The RMS energy of all frames. */ + private final double[] energy; + + /** Spacing of audio frames in samples (see hopTime) */ + protected int hopSize; + + /** The size of an FFT frame in samples (see fftTime) */ + protected int fftSize; + + /** Total number of audio frames if known, or -1 for live or compressed input. */ + private final int totalFrames; + + /** RMS frame energy below this value results in the frame being set to zero, + * so that normalization does not have undesired side-effects. */ + public static double silenceThreshold = 0.0004; + + /** For dynamic range compression, this value is added to the log magnitude + * in each frequency bin and any remaining negative values are then set to zero. + */ + public static double rangeThreshold = 10; + + /** Determines method of normalization. Values can be:
    + *
  • 0: no normalization
  • + *
  • 1: normalization by current frame energy
  • + *
  • 2: normalization by exponential average of frame energy
  • + *
+ */ + public static int normaliseMode = 2; + + /** Ratio between rate of sampling the signal energy (for the amplitude envelope) and the hop size */ + public static int energyOversampleFactor = 2; + + private OnsetHandler handler; + + private final double hopTime; + + private final FFT fft; + + /** + * Create anew onset detector + * @param d the dispatcher + * @param fftSize The size of the fft + * @param hopSize the hop size of audio blocks. + */ + public BeatRootSpectralFluxOnsetDetector(AudioDispatcher d,int fftSize, int hopSize){ + + this.hopSize = hopSize; + this.hopTime = hopSize/d.getFormat().getSampleRate(); + this.fftSize = fftSize; + + System.err.println("Please use the ComplexOnset detector: BeatRootSpectralFluxOnsetDetector does currenlty not support streaming"); + //no overlap + //FIXME: + int durationInFrames = -1000; + totalFrames = (int)(durationInFrames / hopSize) + 4; + energy = new double[totalFrames*energyOversampleFactor]; + spectralFlux = new double[totalFrames]; + + reBuffer = new float[fftSize/2]; + imBuffer = new float[fftSize/2]; + prevFrame = new float[fftSize/2]; + + makeFreqMap(fftSize, d.getFormat().getSampleRate()); + + newFrame = new double[freqMapSize]; + frames = new double[totalFrames][freqMapSize]; + handler = new PrintOnsetHandler(); + fft = new FFT(fftSize,new ScaledHammingWindow()); + } + + @Override + public boolean process(AudioEvent audioEvent) { + frameRMS = audioEvent.getRMS()/2.0; + + float[] audioBuffer = audioEvent.getFloatBuffer().clone(); + + Arrays.fill(imBuffer, 0); + fft.powerPhaseFFTBeatRootOnset(audioBuffer, reBuffer, imBuffer); + Arrays.fill(newFrame, 0); + + double flux = 0; + for (int i = 0; i < fftSize/2; i++) { + if (reBuffer[i] > prevFrame[i]) + flux += reBuffer[i] - prevFrame[i]; + newFrame[freqMap[i]] += reBuffer[i]; + } + spectralFlux[frameCount] = flux; + if (freqMapSize >= 0) System.arraycopy(newFrame, 0, frames[frameCount], 0, freqMapSize); + + int sz = (fftSize - hopSize) / energyOversampleFactor; + int index = hopSize; + for (int j = 0; j < energyOversampleFactor; j++) { + double newEnergy = 0; + for (int i = 0; i < sz; i++) { + newEnergy += audioBuffer[index] * audioBuffer[index]; + if (++index == fftSize) + index = 0; + } + energy[frameCount * energyOversampleFactor + j] = + newEnergy / sz <= 1e-6? 0: Math.log(newEnergy / sz) + 13.816; + } + double decay = frameCount >= 200? 0.99: + (frameCount < 100? 0: (frameCount - 100) / 100.0); + if (ltAverage == 0) + ltAverage = frameRMS; + else + ltAverage = ltAverage * decay + frameRMS * (1.0 - decay); + if (frameRMS <= silenceThreshold) + for (int i = 0; i < freqMapSize; i++) + frames[frameCount][i] = 0; + else { + if (normaliseMode == 1) + for (int i = 0; i < freqMapSize; i++) + frames[frameCount][i] /= frameRMS; + else if (normaliseMode == 2) + for (int i = 0; i < freqMapSize; i++) + frames[frameCount][i] /= ltAverage; + for (int i = 0; i < freqMapSize; i++) { + frames[frameCount][i] = Math.log(frames[frameCount][i]) + rangeThreshold; + if (frames[frameCount][i] < 0) + frames[frameCount][i] = 0; + } + } + + float[] tmp = prevFrame; + prevFrame = reBuffer; + reBuffer = tmp; + frameCount++; + return true; + } + + /** + * Creates a map of FFT frequency bins to comparison bins. + * Where the spacing of FFT bins is less than 0.5 semitones, the mapping is + * one to one. Where the spacing is greater than 0.5 semitones, the FFT + * energy is mapped into semitone-wide bins. No scaling is performed; that + * is the energy is summed into the comparison bins. See also + * processFrame() + */ + protected void makeFreqMap(int fftSize, float sampleRate) { + freqMap = new int[fftSize/2+1]; + double binWidth = sampleRate / fftSize; + int crossoverBin = (int)(2 / (Math.pow(2, 1/12.0) - 1)); + int crossoverMidi = (int)Math.round(Math.log(crossoverBin*binWidth/440)/ + Math.log(2) * 12 + 69); + // freq = 440 * Math.pow(2, (midi-69)/12.0) / binWidth; + int i = 0; + while (i <= crossoverBin) + freqMap[i++] = i; + while (i <= fftSize/2) { + double midi = Math.log(i*binWidth/440) / Math.log(2) * 12 + 69; + if (midi > 127) + midi = 127; + freqMap[i++] = crossoverBin + (int)Math.round(midi) - crossoverMidi; + } + freqMapSize = freqMap[i-1] + 1; + } // makeFreqMap() + + + private void findOnsets(double p1, double p2){ + LinkedList peaks = Peaks.findPeaks(spectralFlux, (int)Math.round(0.06 / hopTime), p1, p2, true); + Iterator it = peaks.iterator(); + + double minSalience = Peaks.min(spectralFlux); + for (int i = 0; i < peaks.size(); i++) { + int index = it.next(); + double time = index * hopTime; + double salience = spectralFlux[index] - minSalience; + handler.handleOnset(time,salience); + } + } + + public void setHandler(OnsetHandler handler) { + this.handler = handler; + } + + @Override + public void processingFinished() { + double p1 = 0.35; + double p2 = 0.84; + Peaks.normalise(spectralFlux); + findOnsets(p1, p2); + } +} diff --git a/app/src/main/java/be/tarsos/dsp/onsets/ComplexOnsetDetector.java b/app/src/main/java/be/tarsos/dsp/onsets/ComplexOnsetDetector.java new file mode 100644 index 0000000..e8d3bab --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/onsets/ComplexOnsetDetector.java @@ -0,0 +1,215 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.onsets; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; +import be.tarsos.dsp.util.PeakPicker; +import be.tarsos.dsp.util.fft.FFT; +import be.tarsos.dsp.util.fft.HannWindow; + +/** + * A complex Domain Method onset detection function + * + * Christopher Duxbury, Mike E. Davies, and Mark B. Sandler. Complex domain + * onset detection for musical signals. In Proceedings of the Digital Audio + * Effects Conference, DAFx-03, pages 90-93, London, UK, 2003 + * + * The implementation is a translation of onset.c from Aubio, Copyright (C) + * 2003-2009 Paul Brossier: piem@aubio.org + * + * @author Joren Six + * @author Paul Brossiers + */ +public class ComplexOnsetDetector implements AudioProcessor, OnsetDetector{ + + + /** + * The threshold to define silence, in dbSPL. + */ + private final double silenceThreshold; + + /** + * The minimum IOI (inter onset interval), in seconds. + */ + private final double minimumInterOnsetInterval; + + /** + * The last detected onset, in seconds. + */ + private double lastOnset; + + /** + * The last detected onset value. + */ + private double lastOnsetValue; + + private final PeakPicker peakPicker; + + private OnsetHandler handler; + + + /** + * To calculate the FFT. + */ + private final FFT fft; + + /** + * Previous phase vector, one frame behind + */ + private final float[] theta1; + /** + * Previous phase vector, two frames behind + */ + private final float[] theta2; + + /** + * Previous norm (power, magnitude) vector + */ + private final float[] oldmag; + + /** + * Current onset detection measure vector + */ + private final float[] dev1; + + /** + * + * @param fftSize The size of the fft to take (e.g. 512) + * @param peakThreshold A threshold used for peak picking. Values between 0.1 and 0.8. Default is 0.3, if too many onsets are detected adjust to 0.4 or 0.5. + * @param silenceThreshold The threshold that defines when a buffer is silent. Default is -70dBSPL. -90 is also used. + * @param minimumInterOnsetInterval The minimum inter-onset-interval in seconds. When two onsets are detected within this interval the last one does not count. Default is 0.004 seconds. + */ + public ComplexOnsetDetector(int fftSize,double peakThreshold,double minimumInterOnsetInterval,double silenceThreshold){ + fft = new FFT(fftSize,new HannWindow()); + this.silenceThreshold = silenceThreshold; + this.minimumInterOnsetInterval = minimumInterOnsetInterval; + + peakPicker = new PeakPicker(peakThreshold); + + int rsize = fftSize/2+1; + oldmag = new float[rsize]; + dev1 = new float[rsize]; + theta1 = new float[rsize]; + theta2 = new float[rsize]; + + handler = new PrintOnsetHandler(); + } + + /** + * Create a new detector + * @param fftSize the size of the fft should be related to the audio block size. + */ + public ComplexOnsetDetector(int fftSize){ + this(fftSize,0.3); + } + + /** + * Create a new detector + * @param fftSize the size of the fft should be related to the audio block size. + * @param peakThreshold the threshold when a peak is accepted. + */ + public ComplexOnsetDetector(int fftSize,double peakThreshold){ + this(fftSize,peakThreshold,0.03); + } + + /** + * Create a new detector + * @param fftSize the size of the fft should be related to the audio block size. + * @param peakThreshold the threshold when a peak is accepted. + * @param minimumInterOnsetInterval The minimum interval between onsets in seconds. + */ + public ComplexOnsetDetector(int fftSize,double peakThreshold,double minimumInterOnsetInterval){ + this(fftSize,peakThreshold,minimumInterOnsetInterval,-70.0); + } + + @Override + public boolean process(AudioEvent audioEvent) { + onsetDetection(audioEvent); + return true; + } + + + private void onsetDetection(AudioEvent audioEvent){ + //calculate the complex fft (the magnitude and phase) + float[] data = audioEvent.getFloatBuffer().clone(); + float[] power = new float[data.length/2]; + float[] phase = new float[data.length/2]; + fft.powerPhaseFFT(data, power, phase); + + float onsetValue = 0; + + for(int j = 0 ; j < power.length ; j++){ + //int imgIndex = (power.length - 1) * 2 - j; + + // compute the predicted phase + dev1[j] = 2.f * theta1[j] - theta2[j]; + + // compute the euclidean distance in the complex domain + // sqrt ( r_1^2 + r_2^2 - 2 * r_1 * r_2 * \cos ( \phi_1 - \phi_2 ) ) + onsetValue += Math.sqrt(Math.abs(Math.pow(oldmag[j],2) + Math.pow(power[j],2) - 2. * oldmag[j] *power[j] * Math.cos(dev1[j] - phase[j]))); + + /* swap old phase data (need to remember 2 frames behind)*/ + theta2[j] = theta1[j]; + theta1[j] = phase[j]; + + /* swap old magnitude data (1 frame is enough) */ + oldmag[j]= power[j]; + } + + lastOnsetValue = onsetValue; + + + boolean isOnset = peakPicker.pickPeak(onsetValue); + if(isOnset){ + if(audioEvent.isSilence(silenceThreshold)){ + isOnset = false; + } else { + double delay = ((audioEvent.getOverlap() * 4.3 ))/ audioEvent.getSampleRate(); + double onsetTime = audioEvent.getTimeStamp() - delay; + if(onsetTime - lastOnset > minimumInterOnsetInterval){ + handler.handleOnset(onsetTime,peakPicker.getLastPeekValue()); + lastOnset = onsetTime; + } + } + } + } + + public void setHandler(OnsetHandler handler) { + this.handler = handler; + } + + /** + * Set a new threshold for detected peaks. + * @param threshold A new threshold. + */ + public void setThreshold(double threshold){ + this.peakPicker.setThreshold(threshold); + } + + @Override + public void processingFinished() { + + } +} diff --git a/app/src/main/java/be/tarsos/dsp/onsets/OnsetDetector.java b/app/src/main/java/be/tarsos/dsp/onsets/OnsetDetector.java new file mode 100644 index 0000000..db34241 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/onsets/OnsetDetector.java @@ -0,0 +1,36 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.onsets; + +/** + * Each onset detector adheres to this interface: it is possible to add handlers to. + */ +public interface OnsetDetector { + + /** + * Add a handler to the onset detector + * @param handler the handler to react to onsets. + */ + void setHandler(OnsetHandler handler); +} diff --git a/app/src/main/java/be/tarsos/dsp/onsets/OnsetHandler.java b/app/src/main/java/be/tarsos/dsp/onsets/OnsetHandler.java new file mode 100644 index 0000000..194a582 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/onsets/OnsetHandler.java @@ -0,0 +1,36 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.onsets; + +/** + * A handler to react on percussive onsets. + */ +public interface OnsetHandler{ + /** + * Handle a new onset + * @param time at this time (s) + * @param salience with this salience (acco + */ + void handleOnset(double time, double salience); +} diff --git a/app/src/main/java/be/tarsos/dsp/onsets/PercussionOnsetDetector.java b/app/src/main/java/be/tarsos/dsp/onsets/PercussionOnsetDetector.java new file mode 100644 index 0000000..c172282 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/onsets/PercussionOnsetDetector.java @@ -0,0 +1,196 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +package be.tarsos.dsp.onsets; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; +import be.tarsos.dsp.util.fft.FFT; + +/** + *

+ * Estimates the locations of percussive onsets using a simple method described + * in "Drum Source Separation using Percussive Feature Detection and Spectral Modulation" + * by Dan Barry, Derry Fitzgerald, Eugene Coyle and Bob Lawlor, + * ISSC 2005. + *

+ *

+ * Implementation based on a VAMP plugin by Chris Cannam at Queen Mary, London: + *

+ * + *
+ *  Centre for Digital Music, Queen Mary, University of London.
+ *  Copyright 2006 Chris Cannam.
+ *    
+ *  Permission is hereby granted, free of charge, to any person
+ *  obtaining a copy of this software and associated documentation
+ *  files (the "Software"), to deal in the Software without
+ *  restriction, including without limitation the rights to use, copy,
+ *  modify, merge, publish, distribute, sublicense, and/or sell copies
+ *  of the Software, and to permit persons to whom the Software is
+ *  furnished to do so, subject to the following conditions:
+ *  
+ *  The above copyright notice and this permission notice shall be
+ *  included in all copies or substantial portions of the Software.
+ *  
+ *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ *  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR
+ *  ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ *  CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ *  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *  
+ *  Except as contained in this notice, the names of the Centre for
+ *  Digital Music; Queen Mary, University of London; and Chris Cannam
+ *  shall not be used in advertising or otherwise to promote the sale,
+ *  use or other dealings in this Software without prior written
+ *  authorization.
+ * 
+ * + * + * + * @author Joren Six + * @author Chris Cannam + * @see "Drum Source Separation using Percussive Feature Detection and Spectral Modulation" + * @see VAMP plugin example + */ +public class PercussionOnsetDetector implements AudioProcessor, OnsetDetector { + + public static final double DEFAULT_THRESHOLD = 8; + + public static final double DEFAULT_SENSITIVITY = 20; + + private final FFT fft; + + private final float[] priorMagnitudes; + private final float[] currentMagnitudes; + + private float dfMinus1, dfMinus2; + + private OnsetHandler handler; + + private final float sampleRate;//samples per second (Hz) + private long processedSamples;//in samples + + /** + * Sensitivity of peak detector applied to broadband detection function (%). + * In [0-100]. + */ + private final double sensitivity; + + /** + * Energy rise within a frequency bin necessary to count toward broadband + * total (dB). In [0-20]. + * + */ + private final double threshold; + + /** + * Create a new percussion onset detector. With a default sensitivity and threshold. + * + * @param sampleRate + * The sample rate in Hz (used to calculate timestamps) + * @param bufferSize + * The size of the buffer in samples. + * @param bufferOverlap + * The overlap of buffers in samples. + * @param handler + * An interface implementor to handle percussion onset events. + */ + public PercussionOnsetDetector(float sampleRate, int bufferSize, + int bufferOverlap, OnsetHandler handler) { + this(sampleRate, bufferSize, handler, + DEFAULT_SENSITIVITY, DEFAULT_THRESHOLD); + } + + /** + * Create a new percussion onset detector. + * + * @param sampleRate + * The sample rate in Hz (used to calculate timestamps) + * @param bufferSize + * The size of the buffer in samples. + * @param handler + * An interface implementor to handle percussion onset events. + * @param sensitivity + * Sensitivity of the peak detector applied to broadband + * detection function (%). In [0-100]. + * @param threshold + * Energy rise within a frequency bin necessary to count toward + * broadband total (dB). In [0-20]. + */ + public PercussionOnsetDetector(float sampleRate, int bufferSize, OnsetHandler handler, double sensitivity, double threshold) { + fft = new FFT(bufferSize / 2); + this.threshold = threshold; + this.sensitivity = sensitivity; + priorMagnitudes = new float[bufferSize / 2]; + currentMagnitudes = new float[bufferSize / 2]; + this.handler = handler; + this.sampleRate = sampleRate; + + } + + @Override + public boolean process(AudioEvent audioEvent) { + float[] audioFloatBuffer = audioEvent.getFloatBuffer(); + this.processedSamples += audioFloatBuffer.length; + this.processedSamples -= audioEvent.getOverlap(); + + fft.forwardTransform(audioFloatBuffer); + fft.modulus(audioFloatBuffer, currentMagnitudes); + int binsOverThreshold = 0; + for (int i = 0; i < currentMagnitudes.length; i++) { + if (priorMagnitudes[i] > 0.f) { + double diff = 10 * Math.log10(currentMagnitudes[i] + / priorMagnitudes[i]); + if (diff >= threshold) { + binsOverThreshold++; + } + } + priorMagnitudes[i] = currentMagnitudes[i]; + } + + if (dfMinus2 < dfMinus1 + && dfMinus1 >= binsOverThreshold + && dfMinus1 > ((100 - sensitivity) * audioFloatBuffer.length) / 200) { + float timeStamp = processedSamples / sampleRate; + handler.handleOnset(timeStamp,-1); + } + + dfMinus2 = dfMinus1; + dfMinus1 = binsOverThreshold; + + return true; + } + + @Override + public void processingFinished() { + } + + @Override + public void setHandler(OnsetHandler handler) { + this.handler = handler; + } +} diff --git a/app/src/main/java/be/tarsos/dsp/onsets/PrintOnsetHandler.java b/app/src/main/java/be/tarsos/dsp/onsets/PrintOnsetHandler.java new file mode 100644 index 0000000..165d99b --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/onsets/PrintOnsetHandler.java @@ -0,0 +1,31 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.onsets; + +public class PrintOnsetHandler implements OnsetHandler{ + @Override + public void handleOnset(double time, double salience) { + System.out.printf("%.4f;%.4f%n", time,salience); + } +} diff --git a/app/src/main/java/be/tarsos/dsp/onsets/package-info.java b/app/src/main/java/be/tarsos/dsp/onsets/package-info.java new file mode 100644 index 0000000..736ca4a --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/onsets/package-info.java @@ -0,0 +1,28 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/** + * Contains various onset detection methods. + */ +package be.tarsos.dsp.onsets; diff --git a/app/src/main/java/be/tarsos/dsp/package-info.java b/app/src/main/java/be/tarsos/dsp/package-info.java new file mode 100644 index 0000000..36310ca --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/package-info.java @@ -0,0 +1,28 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/** + * Contains classes to handle sampled sound. + */ +package be.tarsos.dsp; diff --git a/app/src/main/java/be/tarsos/dsp/pitch/AMDF.java b/app/src/main/java/be/tarsos/dsp/pitch/AMDF.java new file mode 100644 index 0000000..5bfeea6 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/pitch/AMDF.java @@ -0,0 +1,172 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.pitch; + +/** + *

+ * A pitch extractor that extracts the Average Magnitude Difference (AMDF) from + * an audio buffer. This is a good measure of the Pitch (f0) of a signal. + *

+ *

+ * AMDF is calculated by the the difference between the waveform summing a + * lagged version of itself. + *

+ *

+ * The main bulk of the code is written by Eder de Souza for the jAudio framework. Adapted for TarsosDSP by + * Joren Six. + *

+ * + * @author Eder Souza (ederwander on github) + * @author Joren Six + */ +public class AMDF implements PitchDetector{ + + + private static final double DEFAULT_MIN_FREQUENCY = 82.0; + private static final double DEFAULT_MAX_FREQUENCY = 1000.0; + private static final double DEFAULT_RATIO = 5.0; + private static final double DEFAULT_SENSITIVITY = 0.1; + + private final float sampleRate; + + private final double[] amd; + private final long maxPeriod; + private final long minPeriod; + private final double ratio; + private final double sensitivity; + + /** + * + * The result of the pitch detection iteration. + */ + private final PitchDetectionResult result; + + /** + * Construct a new Average Magnitude Difference pitch detector. + * + * @param sampleRate The audio sample rate + * @param bufferSize the buffer size of a block of samples + */ + public AMDF(float sampleRate, int bufferSize) { + this(sampleRate,bufferSize,DEFAULT_MIN_FREQUENCY,DEFAULT_MAX_FREQUENCY); + } + + /** + * Construct a new Average Magnitude Difference pitch detector. + * + * @param sampleRate The audio sample rate + * @param bufferSize the buffer size of a block of samples + * @param minFrequency The min frequency to detect in Hz + * @param maxFrequency The max frequency to detect in Hz + */ + public AMDF(float sampleRate, int bufferSize,double minFrequency,double maxFrequency) { + this.sampleRate = sampleRate; + amd = new double[bufferSize]; + this.ratio = DEFAULT_RATIO; + this.sensitivity = DEFAULT_SENSITIVITY; + this.maxPeriod = Math.round(sampleRate / minFrequency + 0.5); + this.minPeriod = Math.round(sampleRate / maxFrequency + 0.5); + result = new PitchDetectionResult(); + } + + @Override + public PitchDetectionResult getPitch(float[] audioBuffer) { + int t = 0; + float f0 = -1; + double minval = Double.POSITIVE_INFINITY; + double maxval = Double.NEGATIVE_INFINITY; + double[] frames1 = new double[0]; + double[] frames2 = new double[0]; + double[] calcSub = new double[0]; + + int maxShift = audioBuffer.length; + + + for (int i = 0; i < maxShift; i++) { + frames1 = new double[maxShift - i + 1]; + frames2 = new double[maxShift - i + 1]; + t = 0; + for (int aux1 = 0; aux1 < maxShift - i; aux1++) { + t = t + 1; + frames1[t] = audioBuffer[aux1]; + + } + t = 0; + for (int aux2 = i; aux2 < maxShift; aux2++) { + t = t + 1; + frames2[t] = audioBuffer[aux2]; + } + + int frameLength = frames1.length; + calcSub = new double[frameLength]; + for (int u = 0; u < frameLength; u++) { + calcSub[u] = frames1[u] - frames2[u]; + } + + double summation = 0; + for (int l = 0; l < frameLength; l++) { + summation += Math.abs(calcSub[l]); + } + amd[i] = summation; + } + + for (int j = (int)minPeriod; j < (int)maxPeriod; j++){ + if(amd[j] < minval){ + minval = amd[j]; + } + if(amd[j] > maxval) { + maxval = amd[j]; + } + } + int cutoff = (int) Math.round((sensitivity * (maxval - minval)) + minval); + int j=(int)minPeriod; + + while(j<=(int)maxPeriod && (amd[j] > cutoff)){ + j=j+1; + } + + double search_length = minPeriod / 2; + minval = amd[j]; + int minpos = j; + int i=j; + while((iWikiPedia article on DTMF. + * + * @author Joren Six + */ +public class DTMF { + + /** + * The list of valid DTMF frequencies. See the WikiPedia article on DTMF. + */ + public static final double[] DTMF_FREQUENCIES = { 697, 770, 852, 941, 1209, + 1336, 1477, 1633 }; + + /** + * The list of valid DTMF characters. See the WikiPedia article on DTMF for the relation between the characters + * and frequencies. + */ + public static final char[][] DTMF_CHARACTERS = { { '1', '2', '3', 'A' }, + { '4', '5', '6', 'B' }, { '7', '8', '9', 'C' }, + { '*', '0', '#', 'D' } }; + + /** + * Generate a DTMF - tone for a valid DTMF character. + * @param character a valid DTMF character (present in DTMF_CHARACTERS} + * @return a float buffer of predefined length (7168 samples) with the correct DTMF tone representing the character. + */ + public static float[] generateDTMFTone(char character){ + double firstFrequency = -1; + double secondFrequency = -1; + for(int row = 0 ; row < DTMF_CHARACTERS.length ; row++){ + for(int col = 0 ; col < DTMF_CHARACTERS[row].length ; col++){ + if(DTMF_CHARACTERS[row][col] == character){ + firstFrequency = DTMF_FREQUENCIES[row]; + secondFrequency = DTMF_FREQUENCIES[col + 4]; + } + } + } + return DTMF.audioBufferDTMF(firstFrequency,secondFrequency,512*2*10); + } + + /** + * Checks if the given character is present in DTMF_CHARACTERS. + * + * @param character + * the character to check. + * @return True if the given character is present in + * DTMF_CHARACTERS, false otherwise. + */ + public static boolean isDTMFCharacter(char character){ + double firstFrequency = -1; + double secondFrequency = -1; + for(int row = 0 ; row < DTMF_CHARACTERS.length ; row++){ + for(int col = 0 ; col < DTMF_CHARACTERS[row].length ; col++){ + if(DTMF_CHARACTERS[row][col] == character){ + firstFrequency = DTMF_FREQUENCIES[row]; + secondFrequency = DTMF_FREQUENCIES[col + 4]; + } + } + } + return (firstFrequency!=-1 && secondFrequency!=-1); + } + + /** + * Creates an audio buffer in a float array of the defined size. The sample + * rate is 44100Hz by default. It mixes the two given frequencies with an + * amplitude of 0.5. + * + * @param f0 + * The first fundamental frequency. + * @param f1 + * The second fundamental frequency. + * @param size + * The size of the float array (sample rate is 44.1kHz). + * @return An array of the defined size. + */ + public static float[] audioBufferDTMF(final double f0, final double f1, + int size) { + final double sampleRate = 44100.0; + final double amplitudeF0 = 0.4; + final double amplitudeF1 = 0.4; + final double twoPiF0 = 2 * Math.PI * f0; + final double twoPiF1 = 2 * Math.PI * f1; + final float[] buffer = new float[size]; + for (int sample = 0; sample < buffer.length; sample++) { + final double time = sample / sampleRate; + double f0Component = amplitudeF0 * Math.sin(twoPiF0 * time); + double f1Component = amplitudeF1 * Math.sin(twoPiF1 * time); + buffer[sample] = (float) (f0Component + f1Component); + } + return buffer; + } +} diff --git a/app/src/main/java/be/tarsos/dsp/pitch/DynamicWavelet.java b/app/src/main/java/be/tarsos/dsp/pitch/DynamicWavelet.java new file mode 100644 index 0000000..374107b --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/pitch/DynamicWavelet.java @@ -0,0 +1,327 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.pitch; + +import java.util.Arrays; + +/* dywapitchtrack.c + +Dynamic Wavelet Algorithm Pitch Tracking library +Released under the MIT open source licence + +Copyright (c) 2010 Antoine Schmitt + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +*/ + +/** + *

+ * The pitch is the main frequency of the waveform (the 'note' being played or + * sung). It is expressed as a float in Hz. + *

+ *

+ * Unlike the human ear, pitch detection is difficult to achieve for computers. + * Many algorithms have been designed and experimented, but there is no 'best' + * algorithm. They all depend on the context and the tradeoffs acceptable in + * terms of speed and latency. The context includes the quality and 'cleanness' + * of the audio : obviously polyphonic sounds (multiple instruments playing + * different notes at the same time) are extremely difficult to track, + * percussive or noisy audio has no pitch, most real-life audio have some noisy + * moments, some instruments have a lot of harmonics, etc... + *

+ *

+ * The dywapitchtrack is based on a custom-tailored algorithm which is of very + * high quality: both very accurate (precision < 0.05 semitones), very low + * latency (< 23 ms) and very low error rate. It has been thoroughly tested on + * human voice. + *

+ *

+ * It can best be described as a dynamic wavelet algorithm (dywa): + *

+ *

+ * The heart of the algorithm is a very powerful wavelet algorithm, described in + * a paper by Eric Larson and Ross Maddox: Real-Time Time-Domain Pitch Tracking Using Wavelets. + *

+ * + * @author Antoine Schmitt + * @author Joren Six + * @see Real-Time Time-Domain Pitch Tracking Using Wavelets + */ +public class DynamicWavelet implements PitchDetector{ + + // algorithm parameters + private final int maxFLWTlevels = 6; + private final double maxF = 3000.; + private final int differenceLevelsN = 3; + private final double maximaThresholdRatio = 0.75; + + /** + * The result of the pitch detection iteration. + */ + private final PitchDetectionResult result; + + private final float sampleRate; + + private int[] distances; + private int[] mins; + private int[] maxs; + + /** + * create a new dynamic wavelet + * @param sampleRate the sample rate in hz + * @param bufferSize the size of the audio blocks + */ + public DynamicWavelet(float sampleRate,int bufferSize){ + this.sampleRate = sampleRate; + + distances = new int[bufferSize]; + mins = new int[bufferSize]; + maxs = new int[bufferSize]; + result = new PitchDetectionResult(); + } + + @Override + public PitchDetectionResult getPitch(float[] audioBuffer) { + float pitchF = -1.0f; + + int curSamNb = audioBuffer.length; + + int nbMins; + int nbMaxs; + + //check if the buffer size changed + if(distances.length == audioBuffer.length){ + //if not fill the arrays with zero + Arrays.fill(distances,0); + Arrays.fill(mins,0); + Arrays.fill(maxs,0); + } else { + //otherwise create new ones + distances = new int[audioBuffer.length]; + mins = new int[audioBuffer.length]; + maxs = new int[audioBuffer.length]; + } + + double ampltitudeThreshold; + double theDC = 0.0; + + + //compute ampltitudeThreshold and theDC + //first compute the DC and maxAMplitude + double maxValue = 0.0; + double minValue = 0.0; + for (int i = 0; i < audioBuffer.length;i++) { + double sample = audioBuffer[i]; + theDC = theDC + sample; + maxValue = Math.max(maxValue, sample); + minValue = Math.min(sample, minValue); + } + theDC = theDC/audioBuffer.length; + maxValue = maxValue - theDC; + minValue = minValue - theDC; + double amplitudeMax = (maxValue > -minValue ? maxValue : -minValue); + + ampltitudeThreshold = amplitudeMax*maximaThresholdRatio; + + // levels, start without downsampling.. + int curLevel = 0; + double curModeDistance = -1.; + int delta; + + //TODO: refactor to make this more java, break it up in methods, remove the wile and branching statements... + + search: + while(true){ + delta = (int) (sampleRate / (Math.pow(2, curLevel)*maxF)); + if (curSamNb < 2) + break; + + // compute the first maximums and minumums after zero-crossing + // store if greater than the min threshold + // and if at a greater distance than delta + double dv, previousDV = -1000; + + nbMins = nbMaxs = 0; + int lastMinIndex = -1000000; + int lastmaxIndex = -1000000; + boolean findMax = false; + boolean findMin = false; + for (int i = 2; i < curSamNb; i++) { + double si = audioBuffer[i] - theDC; + double si1 = audioBuffer[i-1] - theDC; + + if(si1 <= 0 && si > 0) findMax = true; + if(si1 >= 0 && si < 0) findMin = true; + + // min or max ? + dv = si - si1; + + if (previousDV > -1000) { + if (findMin && previousDV < 0 && dv >= 0) { + + // minimum + if (Math.abs(si) >= ampltitudeThreshold) { + if (i > lastMinIndex + delta) { + mins[nbMins++] = i; + lastMinIndex = i; + findMin = false; + } + } + } + + if (findMax && previousDV > 0 && dv <= 0) { + // maximum + if (Math.abs(si) >= ampltitudeThreshold) { + if (i > lastmaxIndex + delta) { + maxs[nbMaxs++] = i; + lastmaxIndex = i; + findMax = false; + } + } + } + } + previousDV = dv; + } + + if (nbMins == 0 && nbMaxs == 0) { + // no best distance ! + //asLog("dywapitch no mins nor maxs, exiting\n"); + + // if DEBUGG then put "no mins nor maxs, exiting" + break; + } + + int d; + Arrays.fill(distances, 0); + for (int i = 0 ; i < nbMins ; i++) { + for (int j = 1; j < differenceLevelsN; j++) { + if (i+j < nbMins) { + d = Math.abs(mins[i] - mins[i+j]); + //asLog("dywapitch i=%ld j=%ld d=%ld\n", i, j, d); + distances[d] = distances[d] + 1; + } + } + } + + int bestDistance = -1; + int bestValue = -1; + for (int i = 0; i< curSamNb; i++) { + int summed = 0; + for (int j = -delta ; j <= delta ; j++) { + if (i+j >=0 && i+j < curSamNb) + summed += distances[i+j]; + } + //asLog("dywapitch i=%ld summed=%ld bestDistance=%ld\n", i, summed, bestDistance); + if (summed == bestValue) { + if (i == 2*bestDistance) + bestDistance = i; + + } else if (summed > bestValue) { + bestValue = summed; + bestDistance = i; + } + } + + // averaging + double distAvg = 0.0; + double nbDists = 0; + for (int j = -delta ; j <= delta ; j++) { + if (bestDistance+j >=0 && bestDistance+j < audioBuffer.length) { + int nbDist = distances[bestDistance+j]; + if (nbDist > 0) { + nbDists += nbDist; + distAvg += (bestDistance+j)*nbDist; + } + } + } + + // this is our mode distance ! + distAvg /= nbDists; + //asLog("dywapitch distAvg=%f\n", distAvg); + + // continue the levels ? + if (curModeDistance > -1.) { + double similarity = Math.abs(distAvg*2 - curModeDistance); + if (similarity <= 2*delta) { + //if DEBUGG then put "similarity="&similarity&&"delta="&delta&&"ok" + //asLog("dywapitch similarity=%f OK !\n", similarity); + // two consecutive similar mode distances : ok ! + pitchF = (float) (sampleRate/(Math.pow(2,curLevel-1)*curModeDistance)); + break; + } + //if DEBUGG then put "similarity="&similarity&&"delta="&delta&&"not" + } + + // not similar, continue next level + curModeDistance = distAvg; + + + curLevel = curLevel + 1; + if (curLevel >= maxFLWTlevels) { + // put "max levels reached, exiting" + //asLog("dywapitch max levels reached, exiting\n"); + break; + } + + // downsample + if (curSamNb < 2) { + //asLog("dywapitch not enough samples, exiting\n"); + break; + } + //do not modify original audio buffer, make a copy buffer, if + //downsampling is needed (only once). + float[] newAudioBuffer = audioBuffer; + if(curSamNb == distances.length){ + newAudioBuffer = new float[curSamNb/2]; + } + for (int i = 0; i < curSamNb/2; i++) { + newAudioBuffer[i] = (audioBuffer[2*i] + audioBuffer[2*i + 1])/2.0f; + } + audioBuffer = newAudioBuffer; + curSamNb /= 2; + } + + result.setPitch(pitchF); + result.setPitched(-1!=pitchF); + result.setProbability(-1); + + return result; + } +} diff --git a/app/src/main/java/be/tarsos/dsp/pitch/FastYin.java b/app/src/main/java/be/tarsos/dsp/pitch/FastYin.java new file mode 100644 index 0000000..f51b476 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/pitch/FastYin.java @@ -0,0 +1,348 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* +* +* I took Joren's Code and changed it so that +* it uses the FFT to calculate the difference function. +* TarsosDSP is developed by Joren Six at +* The Royal Academy of Fine Arts & Royal Conservatory, +* University College Ghent, +* Hoogpoort 64, 9000 Ghent - Belgium +* +* http://tarsos.0110.be/tag/TarsosDSP +* +*/ + +package be.tarsos.dsp.pitch; + +import be.tarsos.dsp.util.fft.FloatFFT; + +/** + * An implementation of the YIN pitch tracking algorithm which uses an FFT to + * calculate the difference function. This makes calculating the difference + * function more performant. See the YIN paper. This implementation is done by Matthias Mauch and is + * based on {@link Yin} which is based on the implementation found in aubio by Paul Brossier. + * + * @author Matthias Mauch + * @author Joren Six + * @author Paul Brossier + */ +public final class FastYin implements PitchDetector { + /** + * The default YIN threshold value. Should be around 0.10~0.15. See YIN + * paper for more information. + */ + private static final double DEFAULT_THRESHOLD = 0.20; + + /** + * The default size of an audio buffer (in samples). + */ + public static final int DEFAULT_BUFFER_SIZE = 2048; + + /** + * The default overlap of two consecutive audio buffers (in samples). + */ + public static final int DEFAULT_OVERLAP = 1536; + + /** + * The actual YIN threshold. + */ + private final double threshold; + + /** + * The audio sample rate. Most audio has a sample rate of 44.1kHz. + */ + private final float sampleRate; + + /** + * The buffer that stores the calculated values. It is exactly half the size + * of the input buffer. + */ + private final float[] yinBuffer; + + /** + * The result of the pitch detection iteration. + */ + private final PitchDetectionResult result; + + //------------------------ FFT instance members + + /** + * Holds the FFT data, twice the length of the audio buffer. + */ + private final float[] audioBufferFFT; + + /** + * Half of the data, disguised as a convolution kernel. + */ + private final float[] kernel; + + /** + * Buffer to allow convolution via complex multiplication. It calculates the auto correlation function (ACF). + */ + private final float[] yinStyleACF; + + /** + * An FFT object to quickly calculate the difference function. + */ + private final FloatFFT fft; + + /** + * Create a new pitch detector for a stream with the defined sample rate. + * Processes the audio in blocks of the defined size. + * + * @param audioSampleRate + * The sample rate of the audio stream. E.g. 44.1 kHz. + * @param bufferSize + * The size of a buffer. E.g. 1024. + */ + public FastYin(final float audioSampleRate, final int bufferSize) { + this(audioSampleRate, bufferSize, DEFAULT_THRESHOLD); + } + + /** + * Create a new pitch detector for a stream with the defined sample rate. + * Processes the audio in blocks of the defined size. + * + * @param audioSampleRate + * The sample rate of the audio stream. E.g. 44.1 kHz. + * @param bufferSize + * The size of a buffer. E.g. 1024. + * @param yinThreshold + * The parameter that defines which peaks are kept as possible + * pitch candidates. See the YIN paper for more details. + */ + public FastYin(final float audioSampleRate, final int bufferSize, final double yinThreshold) { + this.sampleRate = audioSampleRate; + this.threshold = yinThreshold; + yinBuffer = new float[bufferSize / 2]; + //Initializations for FFT difference step + audioBufferFFT = new float[2*bufferSize]; + kernel = new float[2*bufferSize]; + yinStyleACF = new float[2*bufferSize]; + fft = new FloatFFT(bufferSize); + result = new PitchDetectionResult(); + } + + /** + * The main flow of the YIN algorithm. Returns a pitch value in Hz or -1 if + * no pitch is detected. + * + * @return a pitch value in Hz or -1 if no pitch is detected. + */ + public PitchDetectionResult getPitch(final float[] audioBuffer) { + + final int tauEstimate; + final float pitchInHertz; + + // step 2 + difference(audioBuffer); + + // step 3 + cumulativeMeanNormalizedDifference(); + + // step 4 + tauEstimate = absoluteThreshold(); + + // step 5 + if (tauEstimate != -1) { + final float betterTau = parabolicInterpolation(tauEstimate); + + // step 6 + // TODO Implement optimization for the AUBIO_YIN algorithm. + // 0.77% => 0.5% error rate, + // using the data of the YIN paper + // bestLocalEstimate() + + // conversion to Hz + pitchInHertz = sampleRate / betterTau; + } else{ + // no pitch found + pitchInHertz = -1; + } + + result.setPitch(pitchInHertz); + + return result; + } + + /** + * Implements the difference function as described in step 2 of the YIN + * paper with an FFT to reduce the number of operations. + */ + private void difference(final float[] audioBuffer) { + // POWER TERM CALCULATION + // ... for the power terms in equation (7) in the Yin paper + float[] powerTerms = new float[yinBuffer.length]; + for (int j = 0; j < yinBuffer.length; ++j) { + powerTerms[0] += audioBuffer[j] * audioBuffer[j]; + } + // now iteratively calculate all others (saves a few multiplications) + for (int tau = 1; tau < yinBuffer.length; ++tau) { + powerTerms[tau] = powerTerms[tau-1] - audioBuffer[tau-1] * audioBuffer[tau-1] + audioBuffer[tau+yinBuffer.length] * audioBuffer[tau+yinBuffer.length]; + } + + // YIN-STYLE AUTOCORRELATION via FFT + // 1. data + for (int j = 0; j < audioBuffer.length; ++j) { + audioBufferFFT[2*j] = audioBuffer[j]; + audioBufferFFT[2*j+1] = 0; + } + fft.complexForward(audioBufferFFT); + + // 2. half of the data, disguised as a convolution kernel + for (int j = 0; j < yinBuffer.length; ++j) { + kernel[2*j] = audioBuffer[(yinBuffer.length-1)-j]; + kernel[2*j+1] = 0; + kernel[2*j+audioBuffer.length] = 0; + kernel[2*j+audioBuffer.length+1] = 0; + } + fft.complexForward(kernel); + + // 3. convolution via complex multiplication + for (int j = 0; j < audioBuffer.length; ++j) { + yinStyleACF[2*j] = audioBufferFFT[2*j]*kernel[2*j] - audioBufferFFT[2*j+1]*kernel[2*j+1]; // real + yinStyleACF[2*j+1] = audioBufferFFT[2*j+1]*kernel[2*j] + audioBufferFFT[2*j]*kernel[2*j+1]; // imaginary + } + fft.complexInverse(yinStyleACF, true); + + // CALCULATION OF difference function + // ... according to (7) in the Yin paper. + for (int j = 0; j < yinBuffer.length; ++j) { + // taking only the real part + yinBuffer[j] = powerTerms[0] + powerTerms[j] - 2 * yinStyleACF[2 * (yinBuffer.length - 1 + j)]; + } + } + + /** + * The cumulative mean normalized difference function as described in step 3 + * of the YIN paper.
+ * + * yinBuffer[0] == yinBuffer[1] = 1 + * + */ + private void cumulativeMeanNormalizedDifference() { + int tau; + yinBuffer[0] = 1; + float runningSum = 0; + for (tau = 1; tau < yinBuffer.length; tau++) { + runningSum += yinBuffer[tau]; + yinBuffer[tau] *= tau / runningSum; + } + } + + /** + * Implements step 4 of the AUBIO_YIN paper. + */ + private int absoluteThreshold() { + // Uses another loop construct + // than the AUBIO implementation + int tau; + // first two positions in yinBuffer are always 1 + // So start at the third (index 2) + for (tau = 2; tau < yinBuffer.length; tau++) { + if (yinBuffer[tau] < threshold) { + while (tau + 1 < yinBuffer.length && yinBuffer[tau + 1] < yinBuffer[tau]) { + tau++; + } + // found tau, exit loop and return + // store the probability + // From the YIN paper: The threshold determines the list of + // candidates admitted to the set, and can be interpreted as the + // proportion of aperiodic power tolerated + // within a periodic signal. + // + // Since we want the periodicity and and not aperiodicity: + // periodicity = 1 - aperiodicity + result.setProbability(1 - yinBuffer[tau]); + break; + } + } + + + // if no pitch found, tau => -1 + if (tau == yinBuffer.length || yinBuffer[tau] >= threshold || result.getProbability() > 1.0) { + tau = -1; + result.setProbability(0); + result.setPitched(false); + } else { + result.setPitched(true); + } + + return tau; + } + + /** + * Implements step 5 of the AUBIO_YIN paper. It refines the estimated tau + * value using parabolic interpolation. This is needed to detect higher + * frequencies more precisely. See http://fizyka.umk.pl/nrbook/c10-2.pdf and + * for more background + * http://fedc.wiwi.hu-berlin.de/xplore/tutorials/xegbohtmlnode62.html + * + * @param tauEstimate + * The estimated tau value. + * @return A better, more precise tau value. + */ + private float parabolicInterpolation(final int tauEstimate) { + final float betterTau; + final int x0; + final int x2; + + if (tauEstimate < 1) { + x0 = tauEstimate; + } else { + x0 = tauEstimate - 1; + } + if (tauEstimate + 1 < yinBuffer.length) { + x2 = tauEstimate + 1; + } else { + x2 = tauEstimate; + } + if (x0 == tauEstimate) { + if (yinBuffer[tauEstimate] <= yinBuffer[x2]) { + betterTau = tauEstimate; + } else { + betterTau = x2; + } + } else if (x2 == tauEstimate) { + if (yinBuffer[tauEstimate] <= yinBuffer[x0]) { + betterTau = tauEstimate; + } else { + betterTau = x0; + } + } else { + float s0, s1, s2; + s0 = yinBuffer[x0]; + s1 = yinBuffer[tauEstimate]; + s2 = yinBuffer[x2]; + // fixed AUBIO implementation, thanks to Karl Helgason: + // (2.0f * s1 - s2 - s0) was incorrectly multiplied with -1 + betterTau = tauEstimate + (s2 - s0) / (2 * (2 * s1 - s2 - s0)); + } + return betterTau; + } +} diff --git a/app/src/main/java/be/tarsos/dsp/pitch/GeneralizedGoertzel.java b/app/src/main/java/be/tarsos/dsp/pitch/GeneralizedGoertzel.java new file mode 100644 index 0000000..166da70 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/pitch/GeneralizedGoertzel.java @@ -0,0 +1,113 @@ +package be.tarsos.dsp.pitch; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; +import be.tarsos.dsp.pitch.Goertzel.FrequenciesDetectedHandler; +import be.tarsos.dsp.util.Complex; +import be.tarsos.dsp.util.fft.HammingWindow; +import be.tarsos.dsp.util.fft.WindowFunction; + +/** + * See "Goertzel algorithm generalized to non-integer multiples of fundamental frequency" by Petr Sysel and Pavel Rajmic + * + * + * @author Joren Six + * + */ +public class GeneralizedGoertzel implements AudioProcessor{ + + /** + * A list of frequencies to detect. + */ + private final double[] frequenciesToDetect; + + private final double[] indvec; + + /** + * Cached cosine calculations for each frequency to detect. + */ + private final double[] precalculatedCosines; + /** + * Cached wnk calculations for each frequency to detect. + */ + private final double[] precalculatedWnk; + /** + * A calculated power for each frequency to detect. This array is reused for + * performance reasons. + */ + private final double[] calculatedPowers; + private final Complex[] calculatedComplex; + + private final FrequenciesDetectedHandler handler; + + + /** + * Create a new Generalized Goertzel processor. + * @param audioSampleRate The sample rate of the audio in Hz. + * @param bufferSize the size of the buffer. + * @param frequencies The list of frequencies to detect (in Hz). + * @param handler The handler used to handle the detected frequencies. + */ + public GeneralizedGoertzel(final float audioSampleRate, final int bufferSize, + double[] frequencies, FrequenciesDetectedHandler handler){ + frequenciesToDetect = frequencies; + + indvec = new double[frequenciesToDetect.length]; + for (int j = 0; j < frequenciesToDetect.length; j++) { + indvec[j] = frequenciesToDetect[j]/(audioSampleRate/(float)bufferSize); + } + + + precalculatedCosines = new double[frequencies.length]; + precalculatedWnk = new double[frequencies.length]; + this.handler = handler; + + calculatedPowers = new double[frequencies.length]; + calculatedComplex = new Complex[frequencies.length]; + + for (int i = 0; i < frequenciesToDetect.length; i++) { + precalculatedCosines[i] = 2 * Math.cos(2 * Math.PI + * frequenciesToDetect[i] / audioSampleRate); + precalculatedWnk[i] = Math.exp(-2 * Math.PI + * frequenciesToDetect[i] / audioSampleRate); + } + + } + + @Override + public boolean process(AudioEvent audioEvent) { + + float[] x = audioEvent.getFloatBuffer(); + WindowFunction f = new HammingWindow(); + f.apply(x); + for (int j = 0; j < frequenciesToDetect.length; j++) { + double pik_term = 2 * Math.PI * indvec[j]/(float) audioEvent.getBufferSize(); + double cos_pik_term2 = Math.cos(pik_term) * 2; + Complex cc = new Complex(0,-1*pik_term).exp(); + double s0=0; + double s1=0; + double s2=0; + + for(int i = 0 ; i < audioEvent.getBufferSize() ; i++ ){ + s0 = x[i]+cos_pik_term2*s1-s2; + s2=s1; + s1=s0; + } + s0 = cos_pik_term2 * s1 - s2; + calculatedComplex[j] = cc.times(new Complex(-s1,0)).plus(new Complex(s0,0)); + calculatedPowers[j] = calculatedComplex[j].mod(); + } + + handler.handleDetectedFrequencies(audioEvent.getTimeStamp(),frequenciesToDetect.clone(), calculatedPowers.clone(), + frequenciesToDetect.clone(), calculatedPowers.clone()); + + return true; + } + + + @Override + public void processingFinished() { + + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/pitch/Goertzel.java b/app/src/main/java/be/tarsos/dsp/pitch/Goertzel.java new file mode 100644 index 0000000..cdcc49b --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/pitch/Goertzel.java @@ -0,0 +1,159 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +package be.tarsos.dsp.pitch; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; + +/** + * Contains an implementation of the Goertzel algorithm. It can be used to + * detect if one or more predefined frequencies are present in a signal. E.g. to + * do DTMF decoding. + * + * @author Joren Six + */ +public class Goertzel implements AudioProcessor { + + /** + * If the power in dB is higher than this threshold, the frequency is + * present in the signal. + */ + private static final double POWER_THRESHOLD = 35;// in dB + + /** + * A list of frequencies to detect. + */ + private final double[] frequenciesToDetect; + /** + * Cached cosine calculations for each frequency to detect. + */ + private final double[] precalculatedCosines; + /** + * Cached wnk calculations for each frequency to detect. + */ + private final double[] precalculatedWnk; + /** + * A calculated power for each frequency to detect. This array is reused for + * performance reasons. + */ + private final double[] calculatedPowers; + + private final FrequenciesDetectedHandler handler; + + /** + * Create a new Generalized Goertzel processor. + * @param audioSampleRate The sample rate of the audio in Hz. + * @param bufferSize the size of the buffer. + * @param frequencies The list of frequencies to detect (in Hz). + * @param handler The handler used to handle the detected frequencies. + */ + public Goertzel(final float audioSampleRate, final int bufferSize, + double[] frequencies, FrequenciesDetectedHandler handler) { + + frequenciesToDetect = frequencies; + precalculatedCosines = new double[frequencies.length]; + precalculatedWnk = new double[frequencies.length]; + this.handler = handler; + + calculatedPowers = new double[frequencies.length]; + + for (int i = 0; i < frequenciesToDetect.length; i++) { + precalculatedCosines[i] = 2 * Math.cos(2 * Math.PI + * frequenciesToDetect[i] / audioSampleRate); + precalculatedWnk[i] = Math.exp(-2 * Math.PI + * frequenciesToDetect[i] / audioSampleRate); + } + } + + /** + * An interface used to react on detected frequencies. + * + * @author Joren Six + */ + public interface FrequenciesDetectedHandler { + /** + * React on detected frequencies. + * + * @param timestamp + * A timestamp in seconds + * @param frequencies + * A list of detected frequencies. + * @param powers + * A list of powers of the detected frequencies. + * @param allFrequencies + * A list of all frequencies that were checked. + * @param allPowers + * A list of powers of all frequencies that were checked. + */ + void handleDetectedFrequencies(final double timestamp,final double[] frequencies, + final double[] powers, final double[] allFrequencies, + final double[] allPowers); + } + + @Override + public boolean process(AudioEvent audioEvent) { + float[] audioFloatBuffer = audioEvent.getFloatBuffer(); + double skn0, skn1, skn2; + int numberOfDetectedFrequencies = 0; + for (int j = 0; j < frequenciesToDetect.length; j++) { + skn0 = skn1 = skn2 = 0; + for (int i = 0; i < audioFloatBuffer.length; i++) { + skn2 = skn1; + skn1 = skn0; + skn0 = precalculatedCosines[j] * skn1 - skn2 + + audioFloatBuffer[i]; + } + double wnk = precalculatedWnk[j]; + calculatedPowers[j] = 20 * Math.log10(Math.abs(skn0 - wnk * skn1)); + if (calculatedPowers[j] > POWER_THRESHOLD) { + numberOfDetectedFrequencies++; + } + } + + if (numberOfDetectedFrequencies > 0) { + double[] frequencies = new double[numberOfDetectedFrequencies]; + double[] powers = new double[numberOfDetectedFrequencies]; + int index = 0; + for (int j = 0; j < frequenciesToDetect.length; j++) { + if (calculatedPowers[j] > POWER_THRESHOLD) { + frequencies[index] = frequenciesToDetect[j]; + powers[index] = calculatedPowers[j]; + index++; + } + } + handler.handleDetectedFrequencies(audioEvent.getTimeStamp(),frequencies, powers, + frequenciesToDetect.clone(), calculatedPowers.clone()); + } + + return true; + } + + + + @Override + public void processingFinished() { + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/pitch/McLeodPitchMethod.java b/app/src/main/java/be/tarsos/dsp/pitch/McLeodPitchMethod.java new file mode 100644 index 0000000..ddc6205 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/pitch/McLeodPitchMethod.java @@ -0,0 +1,397 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/** + */ +package be.tarsos.dsp.pitch; + +import java.util.ArrayList; +import java.util.List; + +/** + *

+ * Implementation of The McLeod Pitch Method (MPM). It is described in the + * article A Smarter Way to Find Pitch. According to the article: + *

+ *
+ *

+ * A fast, accurate and robust method for finding the continuous pitch in + * monophonic musical sounds. [It uses] a special normalized version of the + * Squared Difference Function (SDF) coupled with a peak picking algorithm. + *

+ *

+ * MPM runs in real time with a standard 44.1 kHz sampling rate. It operates + * without using low-pass filtering so it can work on sound with high harmonic + * frequencies such as a violin and it can display pitch changes of one cent + * reliably. MPM works well without any post processing to correct the pitch. + *

+ *
+ *

+ * For the moment this implementation uses the inefficient way of calculating + * the pitch. It uses O(Ww) with W the window size in samples and w + * the desired number of ACF coefficients. The implementation can be optimized + * to O((W+w)log(W+w)) by using an Fast Fourier Transform + * to calculate the Auto-Correlation Function. But I am still afraid of the + * dark magic of the FFT and clinging to the familiar, friendly, laggard time + * domain. + *

+ * + * @author Phillip McLeod + * @author Joren Six + */ +public final class McLeodPitchMethod implements PitchDetector { + + /** + * The expected size of an audio buffer (in samples). + */ + public static final int DEFAULT_BUFFER_SIZE = 1024; + + /** + * Overlap defines how much two audio buffers following each other should + * overlap (in samples). 75% overlap is advised in the MPM article. + */ + public static final int DEFAULT_OVERLAP = 768; + + /** + * Defines the relative size the chosen peak (pitch) has. 0.93 means: choose + * the first peak that is higher than 93% of the highest peak detected. 93% + * is the default value used in the Tartini user interface. + */ + private static final double DEFAULT_CUTOFF = 0.97; + /** + * For performance reasons, peaks below this cutoff are not even considered. + */ + private static final double SMALL_CUTOFF = 0.5; + + /** + * Pitch annotations below this threshold are considered invalid, they are + * ignored. + */ + private static final double LOWER_PITCH_CUTOFF = 80.0; // Hz + + /** + * Defines the relative size the chosen peak (pitch) has. + */ + private final double cutoff; + + /** + * The audio sample rate. Most audio has a sample rate of 44.1kHz. + */ + private final float sampleRate; + + /** + * Contains a normalized square difference function value for each delay + * (tau). + */ + private final float[] nsdf; + + /** + * The x and y coordinate of the top of the curve (nsdf). + */ + private float turningPointX, turningPointY; + + /** + * A list with minimum and maximum values of the nsdf curve. + */ + private final List maxPositions = new ArrayList(); + + /** + * A list of estimates of the period of the signal (in samples). + */ + private final List periodEstimates = new ArrayList(); + + /** + * A list of estimates of the amplitudes corresponding with the period + * estimates. + */ + private final List ampEstimates = new ArrayList(); + + /** + * The result of the pitch detection iteration. + */ + private final PitchDetectionResult result; + + /** + * Initializes the normalized square difference value array and stores the + * sample rate. + * + * @param audioSampleRate + * The sample rate of the audio to check. + */ + public McLeodPitchMethod(final float audioSampleRate) { + this(audioSampleRate, DEFAULT_BUFFER_SIZE, DEFAULT_CUTOFF); + } + + /** + * Create a new pitch detector. + * + * @param audioSampleRate + * The sample rate of the audio. + * @param audioBufferSize + * The size of one audio buffer 1024 samples is common. + */ + public McLeodPitchMethod(final float audioSampleRate, final int audioBufferSize) { + this(audioSampleRate, audioBufferSize, DEFAULT_CUTOFF); + } + + /** + * Create a new pitch detector. + * + * @param audioSampleRate + * The sample rate of the audio. + * @param audioBufferSize + * The size of one audio buffer 1024 samples is common. + * @param cutoffMPM + * The cutoff (similar to the YIN threshold). In the Tartini + * paper 0.93 is used. + */ + public McLeodPitchMethod(final float audioSampleRate, final int audioBufferSize, final double cutoffMPM) { + this.sampleRate = audioSampleRate; + nsdf = new float[audioBufferSize]; + this.cutoff = cutoffMPM; + result = new PitchDetectionResult(); + } + + /** + * Implements the normalized square difference function. See section 4 (and + * the explanation before) in the MPM article. This calculation can be + * optimized by using an FFT. The results should remain the same. + * + * @param audioBuffer + * The buffer with audio information. + */ + private void normalizedSquareDifference(final float[] audioBuffer) { + for (int tau = 0; tau < audioBuffer.length; tau++) { + float acf = 0; + float divisorM = 0; + for (int i = 0; i < audioBuffer.length - tau; i++) { + acf += audioBuffer[i] * audioBuffer[i + tau]; + divisorM += audioBuffer[i] * audioBuffer[i] + audioBuffer[i + tau] * audioBuffer[i + tau]; + } + nsdf[tau] = 2 * acf / divisorM; + } + } + + /* + * (non-Javadoc) + * + * @see be.tarsos.pitch.pure.PurePitchDetector#getPitch(float[]) + */ + public PitchDetectionResult getPitch(final float[] audioBuffer) { + final float pitch; + + // 0. Clear previous results (Is this faster than initializing a list + // again and again?) + maxPositions.clear(); + periodEstimates.clear(); + ampEstimates.clear(); + + // 1. Calculate the normalized square difference for each Tau value. + normalizedSquareDifference(audioBuffer); + // 2. Peak picking time: time to pick some peaks. + peakPicking(); + + double highestAmplitude = Double.NEGATIVE_INFINITY; + + for (final Integer tau : maxPositions) { + // make sure every annotation has a probability attached + highestAmplitude = Math.max(highestAmplitude, nsdf[tau]); + + if (nsdf[tau] > SMALL_CUTOFF) { + // calculates turningPointX and Y + parabolicInterpolation(tau); + // store the turning points + ampEstimates.add(turningPointY); + periodEstimates.add(turningPointX); + // remember the highest amplitude + highestAmplitude = Math.max(highestAmplitude, turningPointY); + } + } + + if (periodEstimates.isEmpty()) { + pitch = -1; + } else { + // use the overall maximum to calculate a cutoff. + // The cutoff value is based on the highest value and a relative + // threshold. + final double actualCutoff = cutoff * highestAmplitude; + + // find first period above or equal to cutoff + int periodIndex = 0; + for (int i = 0; i < ampEstimates.size(); i++) { + if (ampEstimates.get(i) >= actualCutoff) { + periodIndex = i; + break; + } + } + + final double period = periodEstimates.get(periodIndex); + final float pitchEstimate = (float) (sampleRate / period); + if (pitchEstimate > LOWER_PITCH_CUTOFF) { + pitch = pitchEstimate; + } else { + pitch = -1; + } + + } + result.setProbability((float) highestAmplitude); + result.setPitch(pitch); + result.setPitched(pitch != -1); + + return result; + } + + /** + *

+ * Finds the x value corresponding with the peak of a parabola. + *

+ *

+ * a,b,c are three samples that follow each other. E.g. a is at 511, b at + * 512 and c at 513; f(a), f(b) and f(c) are the normalized square + * difference values for those samples; x is the peak of the parabola and is + * what we are looking for. Because the samples follow each other + * b - a = 1 the formula for parabolic interpolation + * can be simplified a lot. + *

+ *

+ * The following ASCII ART shows it a bit more clear, imagine this to be a + * bit more curvaceous. + *

+ * + *
+	 *     nsdf(x)
+	 *       ^
+	 *       |
+	 * f(x)  |------ ^
+	 * f(b)  |     / |\
+	 * f(a)  |    /  | \
+	 *       |   /   |  \
+	 *       |  /    |   \
+	 * f(c)  | /     |    \
+	 *       |_____________________> x
+	 *            a  x b  c
+	 * 
+ * + * @param tau + * The delay tau, b value in the drawing is the tau value. + */ + private void parabolicInterpolation(final int tau) { + final float nsdfa = nsdf[tau - 1]; + final float nsdfb = nsdf[tau]; + final float nsdfc = nsdf[tau + 1]; + final float bValue = tau; + final float bottom = nsdfc + nsdfa - 2 * nsdfb; + if (bottom == 0.0) { + turningPointX = bValue; + turningPointY = nsdfb; + } else { + final float delta = nsdfa - nsdfc; + turningPointX = bValue + delta / (2 * bottom); + turningPointY = nsdfb - delta * delta / (8 * bottom); + } + } + + /** + *

+ * Implementation based on the GPL'ED code of Tartini This code can be found in the file + * general/mytransforms.cpp. + *

+ *

+ * Finds the highest value between each pair of positive zero crossings. + * Including the highest value between the last positive zero crossing and + * the end (if any). Ignoring the first maximum (which is at zero). In this + * diagram the desired values are marked with a + + *

+ * + *
+	 *  f(x)
+	 *   ^
+	 *   |
+	 *  1|               +
+	 *   | \      +     /\      +     /\
+	 *  0| _\____/\____/__\/\__/\____/_______> x
+	 *   |   \  /  \  /      \/  \  /
+	 * -1|    \/    \/            \/
+	 *   |
+	 * 
+ * + * @param nsdf + * The array to look for maximum values in. It should contain + * values between -1 and 1 + * @author Phillip McLeod + */ + private void peakPicking() { + + int pos = 0; + int curMaxPos = 0; + + // find the first negative zero crossing + while (pos < (nsdf.length - 1) / 3 && nsdf[pos] > 0) { + pos++; + } + + // loop over all the values below zero + while (pos < nsdf.length - 1 && nsdf[pos] <= 0.0) { + pos++; + } + + // can happen if output[0] is NAN + if (pos == 0) { + pos = 1; + } + + while (pos < nsdf.length - 1) { + assert nsdf[pos] >= 0; + if (nsdf[pos] > nsdf[pos - 1] && nsdf[pos] >= nsdf[pos + 1]) { + if (curMaxPos == 0) { + // the first max (between zero crossings) + curMaxPos = pos; + } else if (nsdf[pos] > nsdf[curMaxPos]) { + // a higher max (between the zero crossings) + curMaxPos = pos; + } + } + pos++; + // a negative zero crossing + if (pos < nsdf.length - 1 && nsdf[pos] <= 0) { + // if there was a maximum add it to the list of maxima + if (curMaxPos > 0) { + maxPositions.add(curMaxPos); + curMaxPos = 0; // clear the maximum position, so we start + // looking for a new ones + } + while (pos < nsdf.length - 1 && nsdf[pos] <= 0.0f) { + pos++; // loop over all the values below zero + } + } + } + if (curMaxPos > 0) { // if there was a maximum in the last part + maxPositions.add(curMaxPos); // add it to the vector of maxima + } + } +} diff --git a/app/src/main/java/be/tarsos/dsp/pitch/PitchDetectionHandler.java b/app/src/main/java/be/tarsos/dsp/pitch/PitchDetectionHandler.java new file mode 100644 index 0000000..87f0826 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/pitch/PitchDetectionHandler.java @@ -0,0 +1,41 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.pitch; + +import be.tarsos.dsp.AudioEvent; + +/** + * An interface to handle detected pitch. + * + * @author Joren Six + */ +public interface PitchDetectionHandler { + /** + * Handle a detected pitch. + * @param pitchDetectionResult + * @param audioEvent + * + */ + void handlePitch(PitchDetectionResult pitchDetectionResult,AudioEvent audioEvent); +} diff --git a/app/src/main/java/be/tarsos/dsp/pitch/PitchDetectionResult.java b/app/src/main/java/be/tarsos/dsp/pitch/PitchDetectionResult.java new file mode 100644 index 0000000..6978407 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/pitch/PitchDetectionResult.java @@ -0,0 +1,127 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.pitch; + + +/** + * A class with information about the result of a pitch detection on a block of + * audio. + * + * It contains: + * + *
    + *
  • The pitch in Hertz.
  • + *
  • A probability (noisiness, (a)periodicity, salience, voicedness or clarity + * measure) for the detected pitch. This is somewhat similar to the term voiced + * which is used in speech recognition. This probability should be calculated + * together with the pitch. The exact meaning of the value depends on the detector used.
  • + *
  • A way to calculate the RMS of the signal.
  • + *
  • A boolean that indicates if the algorithm thinks the signal is pitched or + * not.
  • + *
+ * + * The separate pitched or unpitched boolean can coexist with a defined pitch. + * E.g. if the algorithm detects 220Hz in a noisy signal it may respond with + * 220Hz "unpitched". + * + *

+ * For performance reasons the object is reused. Please create a copy of the object + * if you want to use it on an other thread. + * + * + * @author Joren Six + */ +public class PitchDetectionResult { + /** + * The pitch in Hertz. + */ + private float pitch; + + private float probability; + + private boolean pitched; + + public PitchDetectionResult(){ + pitch = -1; + probability = -1; + pitched = false; + } + + /** + * A copy constructor. Since PitchDetectionResult objects are reused for performance reasons, creating a copy can be practical. + * @param other + */ + public PitchDetectionResult(PitchDetectionResult other){ + this.pitch = other.pitch; + this.probability = other.probability; + this.pitched = other.pitched; + } + + + /** + * @return The pitch in Hertz. + */ + public float getPitch() { + return pitch; + } + + public void setPitch(float pitch) { + this.pitch = pitch; + } + + /* (non-Javadoc) + * @see java.lang.Object#clone() + */ + public PitchDetectionResult clone(){ + return new PitchDetectionResult(this); + } + + /** + * @return A probability (noisiness, (a)periodicity, salience, voicedness or + * clarity measure) for the detected pitch. This is somewhat similar + * to the term voiced which is used in speech recognition. This + * probability should be calculated together with the pitch. The + * exact meaning of the value depends on the detector used. + */ + public float getProbability() { + return probability; + } + + public void setProbability(float probability) { + this.probability = probability; + } + + /** + * @return Whether the algorithm thinks the block of audio is pitched. Keep + * in mind that an algorithm can come up with a best guess for a + * pitch even when isPitched() is false. + */ + public boolean isPitched() { + return pitched; + } + + public void setPitched(boolean pitched) { + this.pitched = pitched; + } +} diff --git a/app/src/main/java/be/tarsos/dsp/pitch/PitchDetector.java b/app/src/main/java/be/tarsos/dsp/pitch/PitchDetector.java new file mode 100644 index 0000000..9d6975b --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/pitch/PitchDetector.java @@ -0,0 +1,46 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +package be.tarsos.dsp.pitch; + +/** + * A pitch detector is capable of analyzing a buffer with audio information + * and return a pitch estimation in Hz. + * + * @author Joren Six + */ +public interface PitchDetector { + /** + * Analyzes a buffer with audio information and estimates a pitch in Hz. + * Currently this interface only allows one pitch per buffer. + * + * @param audioBuffer + * The buffer with audio information. The information in the + * buffer is not modified so it can be (re)used for e.g. FFT + * analysis. + * @return An estimation of the pitch in Hz or -1 if no pitch is detected or + * present in the buffer. + */ + PitchDetectionResult getPitch(final float[] audioBuffer); +} diff --git a/app/src/main/java/be/tarsos/dsp/pitch/PitchProcessor.java b/app/src/main/java/be/tarsos/dsp/pitch/PitchProcessor.java new file mode 100644 index 0000000..f3a6031 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/pitch/PitchProcessor.java @@ -0,0 +1,144 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +package be.tarsos.dsp.pitch; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; + +/** + * Is responsible to call a pitch estimation algorithm. It also calculates progress. + * The underlying pitch detection algorithm must implement the {@link PitchDetector} interface. + * @author Joren Six + */ +public class PitchProcessor implements AudioProcessor { + + /** + * A list of pitch estimation algorithms. + * @author Joren Six + */ + public enum PitchEstimationAlgorithm { + /** + * See {@link Yin} for the implementation. Or see the YIN article. + */ + YIN, + /** + * See {@link McLeodPitchMethod}. It is described in the article "A Smarter Way to Find Pitch". + */ + MPM, + /** + * A YIN implementation with a faster {@link FastYin} for the implementation. Or see the YIN article. + */ + FFT_YIN, + /** + * An implementation of a dynamic wavelet pitch detection algorithm (See + * {@link DynamicWavelet}), described in a paper by Eric Larson and Ross + * Maddox 'Real-Time Time-Domain Pitch Tracking Using Wavelets' + */ + DYNAMIC_WAVELET, + /** + * Returns the frequency of the FFT-bin with most energy. + */ + FFT_PITCH, + /** + * A pitch extractor that extracts the Average Magnitude Difference + * (AMDF) from an audio buffer. This is a good measure of the Pitch (f0) + * of a signal. + */ + AMDF; + + /** + * Returns a new instance of a pitch detector object based on the provided values. + * @param sampleRate The sample rate of the audio buffer. + * @param bufferSize The size (in samples) of the audio buffer. + * @return A new pitch detector object. + */ + public PitchDetector getDetector(float sampleRate,int bufferSize){ + PitchDetector detector; + if (this == MPM ) { + detector = new McLeodPitchMethod(sampleRate, bufferSize); + } else if(this == DYNAMIC_WAVELET ) { + detector = new DynamicWavelet(sampleRate,bufferSize); + } else if(this == FFT_YIN){ + detector = new FastYin(sampleRate, bufferSize); + } else if(this==AMDF){ + detector = new AMDF(sampleRate, bufferSize); + } else { + detector = new Yin(sampleRate, bufferSize); + } + return detector; + } + + } + + /** + * The underlying pitch detector; + */ + private final PitchDetector detector; + + private final PitchDetectionHandler handler; + + /** + * Initialize a new pitch processor. + * + * @param algorithm + * An enum defining the algorithm. + * @param sampleRate + * The sample rate of the buffer (Hz). + * @param bufferSize + * The size of the buffer in samples. + * @param handler + * The handler handles detected pitch. + */ + public PitchProcessor(PitchEstimationAlgorithm algorithm, float sampleRate, + int bufferSize, + PitchDetectionHandler handler) { + detector = algorithm.getDetector(sampleRate, bufferSize); + this.handler = handler; + } + + @Override + public boolean process(AudioEvent audioEvent) { + float[] audioFloatBuffer = audioEvent.getFloatBuffer(); + + PitchDetectionResult result = detector.getPitch(audioFloatBuffer); + + + handler.handlePitch(result,audioEvent); + return true; + } + + @Override + public void processingFinished() { + } + + +} diff --git a/app/src/main/java/be/tarsos/dsp/pitch/Yin.java b/app/src/main/java/be/tarsos/dsp/pitch/Yin.java new file mode 100644 index 0000000..2bdadfd --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/pitch/Yin.java @@ -0,0 +1,273 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +package be.tarsos.dsp.pitch; + +/** + * An implementation of the AUBIO_YIN pitch tracking algorithm. See the YIN paper. Implementation based on aubio + * + * @author Joren Six + * @author Paul Brossier + */ +public final class Yin implements PitchDetector { + /** + * The default YIN threshold value. Should be around 0.10~0.15. See YIN + * paper for more information. + */ + private static final double DEFAULT_THRESHOLD = 0.20; + + /** + * The default size of an audio buffer (in samples). + */ + public static final int DEFAULT_BUFFER_SIZE = 2048; + + /** + * The default overlap of two consecutive audio buffers (in samples). + */ + public static final int DEFAULT_OVERLAP = 1536; + + /** + * The actual YIN threshold. + */ + private final double threshold; + + /** + * The audio sample rate. Most audio has a sample rate of 44.1kHz. + */ + private final float sampleRate; + + /** + * The buffer that stores the calculated values. It is exactly half the size + * of the input buffer. + */ + private final float[] yinBuffer; + + /** + * The result of the pitch detection iteration. + */ + private final PitchDetectionResult result; + + /** + * Create a new pitch detector for a stream with the defined sample rate. + * Processes the audio in blocks of the defined size. + * + * @param audioSampleRate + * The sample rate of the audio stream. E.g. 44.1 kHz. + * @param bufferSize + * The size of a buffer. E.g. 1024. + */ + public Yin(final float audioSampleRate, final int bufferSize) { + this(audioSampleRate, bufferSize, DEFAULT_THRESHOLD); + } + + /** + * Create a new pitch detector for a stream with the defined sample rate. + * Processes the audio in blocks of the defined size. + * + * @param audioSampleRate + * The sample rate of the audio stream. E.g. 44.1 kHz. + * @param bufferSize + * The size of a buffer. E.g. 1024. + * @param yinThreshold + * The parameter that defines which peaks are kept as possible + * pitch candidates. See the YIN paper for more details. + */ + public Yin(final float audioSampleRate, final int bufferSize, final double yinThreshold) { + this.sampleRate = audioSampleRate; + this.threshold = yinThreshold; + yinBuffer = new float[bufferSize / 2]; + result = new PitchDetectionResult(); + } + + /** + * The main flow of the YIN algorithm. Returns a pitch value in Hz or -1 if + * no pitch is detected. + * + * @return a pitch value in Hz or -1 if no pitch is detected. + */ + public PitchDetectionResult getPitch(final float[] audioBuffer) { + + final int tauEstimate; + final float pitchInHertz; + + // step 2 + difference(audioBuffer); + + // step 3 + cumulativeMeanNormalizedDifference(); + + // step 4 + tauEstimate = absoluteThreshold(); + + // step 5 + if (tauEstimate != -1) { + final float betterTau = parabolicInterpolation(tauEstimate); + + // step 6 + // TODO Implement optimization for the AUBIO_YIN algorithm. + // 0.77% => 0.5% error rate, + // using the data of the YIN paper + // bestLocalEstimate() + + // conversion to Hz + pitchInHertz = sampleRate / betterTau; + } else{ + // no pitch found + pitchInHertz = -1; + } + + result.setPitch(pitchInHertz); + + return result; + } + + /** + * Implements the difference function as described in step 2 of the YIN + * paper. + */ + private void difference(final float[] audioBuffer) { + int index, tau; + float delta; + for (tau = 0; tau < yinBuffer.length; tau++) { + yinBuffer[tau] = 0; + } + for (tau = 1; tau < yinBuffer.length; tau++) { + for (index = 0; index < yinBuffer.length; index++) { + delta = audioBuffer[index] - audioBuffer[index + tau]; + yinBuffer[tau] += delta * delta; + } + } + } + + /** + * The cumulative mean normalized difference function as described in step 3 + * of the YIN paper.
+ * + * yinBuffer[0] == yinBuffer[1] = 1 + * + */ + private void cumulativeMeanNormalizedDifference() { + int tau; + yinBuffer[0] = 1; + float runningSum = 0; + for (tau = 1; tau < yinBuffer.length; tau++) { + runningSum += yinBuffer[tau]; + yinBuffer[tau] *= tau / runningSum; + } + } + + /** + * Implements step 4 of the AUBIO_YIN paper. + */ + private int absoluteThreshold() { + // Uses another loop construct + // than the AUBIO implementation + int tau; + // first two positions in yinBuffer are always 1 + // So start at the third (index 2) + for (tau = 2; tau < yinBuffer.length; tau++) { + if (yinBuffer[tau] < threshold) { + while (tau + 1 < yinBuffer.length && yinBuffer[tau + 1] < yinBuffer[tau]) { + tau++; + } + // found tau, exit loop and return + // store the probability + // From the YIN paper: The threshold determines the list of + // candidates admitted to the set, and can be interpreted as the + // proportion of aperiodic power tolerated + // within a periodic signal. + // + // Since we want the periodicity and and not aperiodicity: + // periodicity = 1 - aperiodicity + result.setProbability(1 - yinBuffer[tau]); + break; + } + } + + + // if no pitch found, tau => -1 + if (tau == yinBuffer.length || yinBuffer[tau] >= threshold) { + tau = -1; + result.setProbability(0); + result.setPitched(false); + } else { + result.setPitched(true); + } + + return tau; + } + + /** + * Implements step 5 of the AUBIO_YIN paper. It refines the estimated tau + * value using parabolic interpolation. This is needed to detect higher + * frequencies more precisely. See http://fizyka.umk.pl/nrbook/c10-2.pdf and + * for more background + * http://fedc.wiwi.hu-berlin.de/xplore/tutorials/xegbohtmlnode62.html + * + * @param tauEstimate + * The estimated tau value. + * @return A better, more precise tau value. + */ + private float parabolicInterpolation(final int tauEstimate) { + final float betterTau; + final int x0; + final int x2; + + if (tauEstimate < 1) { + x0 = tauEstimate; + } else { + x0 = tauEstimate - 1; + } + if (tauEstimate + 1 < yinBuffer.length) { + x2 = tauEstimate + 1; + } else { + x2 = tauEstimate; + } + if (x0 == tauEstimate) { + if (yinBuffer[tauEstimate] <= yinBuffer[x2]) { + betterTau = tauEstimate; + } else { + betterTau = x2; + } + } else if (x2 == tauEstimate) { + if (yinBuffer[tauEstimate] <= yinBuffer[x0]) { + betterTau = tauEstimate; + } else { + betterTau = x0; + } + } else { + float s0, s1, s2; + s0 = yinBuffer[x0]; + s1 = yinBuffer[tauEstimate]; + s2 = yinBuffer[x2]; + // fixed AUBIO implementation, thanks to Karl Helgason: + // (2.0f * s1 - s2 - s0) was incorrectly multiplied with -1 + betterTau = tauEstimate + (s2 - s0) / (2 * (2 * s1 - s2 - s0)); + } + return betterTau; + } +} diff --git a/app/src/main/java/be/tarsos/dsp/pitch/package-info.java b/app/src/main/java/be/tarsos/dsp/pitch/package-info.java new file mode 100644 index 0000000..c93e39d --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/pitch/package-info.java @@ -0,0 +1,28 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/** + * Signal processing methods for pitch estimation. + */ +package be.tarsos.dsp.pitch; diff --git a/app/src/main/java/be/tarsos/dsp/resample/FilterKit.java b/app/src/main/java/be/tarsos/dsp/resample/FilterKit.java new file mode 100644 index 0000000..b06403a --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/resample/FilterKit.java @@ -0,0 +1,263 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/****************************************************************************** + * + * libresample4j + * Copyright (c) 2009 Laszlo Systems, Inc. All Rights Reserved. + * + * libresample4j is a Java port of Dominic Mazzoni's libresample 0.1.3, + * which is in turn based on Julius Smith's Resample 1.7 library. + * http://www-ccrma.stanford.edu/~jos/resample/ + * + * License: LGPL -- see the file LICENSE.txt for more information + * + *****************************************************************************/ +package be.tarsos.dsp.resample; + +/** + * This file provides Kaiser-windowed low-pass filter support, + * including a function to create the filter coefficients, and + * two functions to apply the filter at a particular point. + * + *

+ * reference: "Digital Filters, 2nd edition"
+ *            R.W. Hamming, pp. 178-179
+ *
+ * Izero() computes the 0th order modified bessel function of the first kind.
+ *    (Needed to compute Kaiser window).
+ *
+ * LpFilter() computes the coeffs of a Kaiser-windowed low pass filter with
+ *    the following characteristics:
+ *
+ *       c[]  = array in which to store computed coeffs
+ *       frq  = roll-off frequency of filter
+ *       N    = Half the window length in number of coeffs
+ *       Beta = parameter of Kaiser window
+ *       Num  = number of coeffs before 1/frq
+ *
+ * Beta trades the rejection of the lowpass filter against the transition
+ *    width from passband to stopband.  Larger Beta means a slower
+ *    transition and greater stopband rejection.  See Rabiner and Gold
+ *    (Theory and Application of DSP) under Kaiser windows for more about
+ *    Beta.  The following table from Rabiner and Gold gives some feel
+ *    for the effect of Beta:
+ *
+ * All ripples in dB, width of transition band = D*N where N = window length
+ *
+ *               BETA    D       PB RIP   SB RIP
+ *               2.120   1.50  +-0.27      -30
+ *               3.384   2.23    0.0864    -40
+ *               4.538   2.93    0.0274    -50
+ *               5.658   3.62    0.00868   -60
+ *               6.764   4.32    0.00275   -70
+ *               7.865   5.0     0.000868  -80
+ *               8.960   5.7     0.000275  -90
+ *               10.056  6.4     0.000087  -100
+ * 
+ */ +class FilterKit { + + // Max error acceptable in Izero + private static final double IzeroEPSILON = 1E-21; + + private static double Izero(double x) { + double sum, u, halfx, temp; + int n; + + sum = u = n = 1; + halfx = x / 2.0; + do { + temp = halfx / (double) n; + n += 1; + temp *= temp; + u *= temp; + sum += u; + } while (u >= IzeroEPSILON * sum); + return (sum); + } + + public static void lrsLpFilter(double[] c, int N, double frq, double Beta, int Num) { + double IBeta, temp, temp1, inm1; + int i; + + // Calculate ideal lowpass filter impulse response coefficients: + c[0] = 2.0 * frq; + for (i = 1; i < N; i++) { + temp = Math.PI * (double) i / (double) Num; + c[i] = Math.sin(2.0 * temp * frq) / temp; // Analog sinc function, + // cutoff = frq + } + + /* + * Calculate and Apply Kaiser window to ideal lowpass filter. Note: last + * window value is IBeta which is NOT zero. You're supposed to really + * truncate the window here, not ramp it to zero. This helps reduce the + * first sidelobe. + */ + IBeta = 1.0 / Izero(Beta); + inm1 = 1.0 / ((double) (N - 1)); + for (i = 1; i < N; i++) { + temp = (double) i * inm1; + temp1 = 1.0 - temp * temp; + temp1 = (temp1 < 0 ? 0 : temp1); /* + * make sure it's not negative + * since we're taking the square + * root - this happens on Pentium + * 4's due to tiny roundoff errors + */ + c[i] *= Izero(Beta * Math.sqrt(temp1)) * IBeta; + } + } + + /** + * + * @param Imp impulse response + * @param ImpD impulse response deltas + * @param Nwing length of one wing of filter + * @param Interp Interpolate coefs using deltas? + * @param Xp_array Current sample array + * @param Xp_index Current sample index + * @param Ph Phase + * @param Inc increment (1 for right wing or -1 for left) + * @return v. + */ + public static float lrsFilterUp(float[] Imp, float[] ImpD, int Nwing, boolean Interp, float[] Xp_array, int Xp_index, double Ph, + int Inc) { + double a = 0; + float v, t; + + Ph *= Resampler.Npc; // Npc is number of values per 1/delta in impulse + // response + + v = 0.0f; // The output value + + float[] Hp_array = Imp; + int Hp_index = (int) Ph; + + int End_index = Nwing; + + float[] Hdp_array = ImpD; + int Hdp_index = (int) Ph; + + if (Interp) { + // Hdp = &ImpD[(int)Ph]; + a = Ph - Math.floor(Ph); /* fractional part of Phase */ + } + + if (Inc == 1) // If doing right wing... + { // ...drop extra coeff, so when Ph is + End_index--; // 0.5, we don't do too many mult's + if (Ph == 0) // If the phase is zero... + { // ...then we've already skipped the + Hp_index += Resampler.Npc; // first sample, so we must also + Hdp_index += Resampler.Npc; // skip ahead in Imp[] and ImpD[] + } + } + + if (Interp) + while (Hp_index < End_index) { + t = Hp_array[Hp_index]; /* Get filter coeff */ + t += Hdp_array[Hdp_index] * a; /* t is now interp'd filter coeff */ + Hdp_index += Resampler.Npc; /* Filter coeff differences step */ + t *= Xp_array[Xp_index]; /* Mult coeff by input sample */ + v += t; /* The filter output */ + Hp_index += Resampler.Npc; /* Filter coeff step */ + Xp_index += Inc; /* Input signal step. NO CHECK ON BOUNDS */ + } + else + while (Hp_index < End_index) { + t = Hp_array[Hp_index]; /* Get filter coeff */ + t *= Xp_array[Xp_index]; /* Mult coeff by input sample */ + v += t; /* The filter output */ + Hp_index += Resampler.Npc; /* Filter coeff step */ + Xp_index += Inc; /* Input signal step. NO CHECK ON BOUNDS */ + } + + return v; + } + + /** + * + * @param Imp impulse response + * @param ImpD impulse response deltas + * @param Nwing length of one wing of filter + * @param Interp Interpolate coefs using deltas? + * @param Xp_array Current sample array + * @param Xp_index Current sample index + * @param Ph Phase + * @param Inc increment (1 for right wing or -1 for left) + * @param dhb filter sampling period + * @return v. + */ + public static float lrsFilterUD(float[] Imp, float[] ImpD, int Nwing, boolean Interp, float[] Xp_array, int Xp_index, double Ph, + int Inc, double dhb) { + float a; + float v, t; + double Ho; + + v = 0.0f; // The output value + Ho = Ph * dhb; + + int End_index = Nwing; + + if (Inc == 1) // If doing right wing... + { // ...drop extra coeff, so when Ph is + End_index--; // 0.5, we don't do too many mult's + if (Ph == 0) // If the phase is zero... + Ho += dhb; // ...then we've already skipped the + } // first sample, so we must also + // skip ahead in Imp[] and ImpD[] + + float[] Hp_array = Imp; + int Hp_index; + + if (Interp) { + float[] Hdp_array = ImpD; + int Hdp_index; + + while ((Hp_index = (int) Ho) < End_index) { + t = Hp_array[Hp_index]; // Get IR sample + Hdp_index = (int) Ho; // get interp bits from diff table + a = (float) (Ho - Math.floor(Ho)); // a is logically between 0 + // and 1 + t += Hdp_array[Hdp_index] * a; // t is now interp'd filter coeff + t *= Xp_array[Xp_index]; // Mult coeff by input sample + v += t; // The filter output + Ho += dhb; // IR step + Xp_index += Inc; // Input signal step. NO CHECK ON BOUNDS + } + } else { + while ((Hp_index = (int) Ho) < End_index) { + t = Hp_array[Hp_index]; // Get IR sample + t *= Xp_array[Xp_index]; // Mult coeff by input sample + v += t; // The filter output + Ho += dhb; // IR step + Xp_index += Inc; // Input signal step. NO CHECK ON BOUNDS + } + } + + return v; + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/resample/RateTransposer.java b/app/src/main/java/be/tarsos/dsp/resample/RateTransposer.java new file mode 100644 index 0000000..e25aaec --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/resample/RateTransposer.java @@ -0,0 +1,85 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.resample; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; + + +/** + * Sample rate transposer. Changes sample rate by using interpolation + * + * Together with the time stretcher this can be used for pitch shifting. + * @author Joren Six + */ +public class RateTransposer implements AudioProcessor { + + private double factor; + private final Resampler r; + + /** + * Create a new sample rate transposer. The factor determines the new sample + * rate. E.g. 0.5 is half the sample rate, 1.0 does not change a thing and + * 2.0 doubles the samplerate. If the samples are played at the original + * speed the pitch doubles (0.5), does not change (1.0) or halves (0.5) + * respectively. Playback length follows the same rules, obviously. + * + * @param factor + * Determines the new sample rate. E.g. 0.5 is half the sample + * rate, 1.0 does not change a thing and 2.0 doubles the sample + * rate. If the samples are played at the original speed the + * pitch doubles (0.5), does not change (1.0) or halves (0.5) + * respectively. Playback length follows the same rules, + * obviously. + */ + public RateTransposer(double factor){ + this.factor = factor; + r= new Resampler(false,0.1,4.0); + } + + public void setFactor(double tempo){ + this.factor = tempo; + } + + @Override + public boolean process(AudioEvent audioEvent) { + float[] src = audioEvent.getFloatBuffer(); + //Creation of float array in loop could be prevented if src.length is known beforehand... + //Possible optimization is to instantiate it outside the loop and get a pointer to the + //array here, in the process method method. + float[] out = new float[(int) (src.length * factor)]; + r.process(factor, src, 0, src.length, false, out, 0, out.length); + //The size of the output buffer changes (according to factor). + audioEvent.setFloatBuffer(out); + //Update overlap offset to match new buffer size + audioEvent.setOverlap((int) (audioEvent.getOverlap() * factor)); + return true; + } + + @Override + public void processingFinished() { + + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/resample/Resampler.java b/app/src/main/java/be/tarsos/dsp/resample/Resampler.java new file mode 100644 index 0000000..b1d1ac4 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/resample/Resampler.java @@ -0,0 +1,474 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/****************************************************************************** + * + * libresample4j + * Copyright (c) 2009 Laszlo Systems, Inc. All Rights Reserved. + * + * libresample4j is a Java port of Dominic Mazzoni's libresample 0.1.3, + * which is in turn based on Julius Smith's Resample 1.7 library. + * http://www-ccrma.stanford.edu/~jos/resample/ + * + * License: LGPL -- see the file LICENSE.txt for more information + * + *****************************************************************************/ +package be.tarsos.dsp.resample; + +import java.nio.FloatBuffer; + +public class Resampler { + + public static class Result { + public final int inputSamplesConsumed; + public final int outputSamplesGenerated; + + public Result(int inputSamplesConsumed, int outputSamplesGenerated) { + this.inputSamplesConsumed = inputSamplesConsumed; + this.outputSamplesGenerated = outputSamplesGenerated; + } + } + + // number of values per 1/delta in impulse response + protected static final int Npc = 4096; + + private final float[] Imp; + private final float[] ImpD; + private final float LpScl; + private final int Nmult; + private final int Nwing; + private final double minFactor; + private final double maxFactor; + private final int XSize; + private final float[] X; + private int Xp; // Current "now"-sample pointer for input + private int Xread; // Position to put new samples + private final int Xoff; + private final float[] Y; + private int Yp; + private double Time; + + /** + * Clone an existing resampling session. Faster than creating one from scratch. + * + * @param other + */ + public Resampler(Resampler other) { + this.Imp = other.Imp.clone(); + this.ImpD = other.ImpD.clone(); + this.LpScl = other.LpScl; + this.Nmult = other.Nmult; + this.Nwing = other.Nwing; + this.minFactor = other.minFactor; + this.maxFactor = other.maxFactor; + this.XSize = other.XSize; + this.X = other.X.clone(); + this.Xp = other.Xp; + this.Xread = other.Xread; + this.Xoff = other.Xoff; + this.Y = other.Y.clone(); + this.Yp = other.Yp; + this.Time = other.Time; + } + + /** + * Create a new resampling session. + * + * @param highQuality true for better quality, slower processing time + * @param minFactor lower bound on resampling factor for this session + * @param maxFactor upper bound on resampling factor for this session + * @throws IllegalArgumentException if minFactor or maxFactor is not + * positive, or if maxFactor is less than minFactor + */ + public Resampler(boolean highQuality, double minFactor, double maxFactor) { + if (minFactor <= 0.0 || maxFactor <= 0.0) { + throw new IllegalArgumentException("minFactor and maxFactor must be positive"); + } + if (maxFactor < minFactor) { + throw new IllegalArgumentException("minFactor must be <= maxFactor"); + } + + this.minFactor = minFactor; + this.maxFactor = maxFactor; + this.Nmult = highQuality ? 35 : 11; + this.LpScl = 1.0f; + this.Nwing = Npc * (this.Nmult - 1) / 2; // # of filter coeffs in right wing + + double Rolloff = 0.90; + double Beta = 6; + + double[] Imp64 = new double[this.Nwing]; + + FilterKit.lrsLpFilter(Imp64, this.Nwing, 0.5 * Rolloff, Beta, Npc); + this.Imp = new float[this.Nwing]; + this.ImpD = new float[this.Nwing]; + + for (int i = 0; i < this.Nwing; i++) { + this.Imp[i] = (float) Imp64[i]; + } + + // Storing deltas in ImpD makes linear interpolation + // of the filter coefficients faster + for (int i = 0; i < this.Nwing - 1; i++) { + this.ImpD[i] = this.Imp[i + 1] - this.Imp[i]; + } + + // Last coeff. not interpolated + this.ImpD[this.Nwing - 1] = -this.Imp[this.Nwing - 1]; + + // Calc reach of LP filter wing (plus some creeping room) + int Xoff_min = (int) (((this.Nmult + 1) / 2.0) * Math.max(1.0, 1.0 / minFactor) + 10); + int Xoff_max = (int) (((this.Nmult + 1) / 2.0) * Math.max(1.0, 1.0 / maxFactor) + 10); + this.Xoff = Math.max(Xoff_min, Xoff_max); + + // Make the inBuffer size at least 4096, but larger if necessary + // in order to store the minimum reach of the LP filter and then some. + // Then allocate the buffer an extra Xoff larger so that + // we can zero-pad up to Xoff zeros at the end when we reach the + // end of the input samples. + this.XSize = Math.max(2 * this.Xoff + 10, 4096); + this.X = new float[this.XSize + this.Xoff]; + this.Xp = this.Xoff; + this.Xread = this.Xoff; + + // Make the outBuffer long enough to hold the entire processed + // output of one inBuffer + int YSize = (int) (((double) this.XSize) * maxFactor + 2.0); + this.Y = new float[YSize]; + this.Yp = 0; + + this.Time = (double) this.Xoff; // Current-time pointer for converter + } + + public int getFilterWidth() { + return this.Xoff; + } + + /** + * Process a batch of samples. There is no guarantee that the input buffer will be drained. + * + * @param factor factor at which to resample this batch + * @param buffers sample buffer for producing input and consuming output + * @param lastBatch true if this is known to be the last batch of samples + * @return true iff resampling is complete (ie. no input samples consumed and no output samples produced) + */ + public boolean process(double factor, SampleBuffers buffers, boolean lastBatch) { + if (factor < this.minFactor || factor > this.maxFactor) { + throw new IllegalArgumentException("factor " + factor + " is not between minFactor=" + minFactor + + " and maxFactor=" + maxFactor); + } + + int outBufferLen = buffers.getOutputBufferLength(); + int inBufferLen = buffers.getInputBufferLength(); + + float[] Imp = this.Imp; + float[] ImpD = this.ImpD; + float LpScl = this.LpScl; + int Nwing = this.Nwing; + boolean interpFilt = false; // TRUE means interpolate filter coeffs + + int inBufferUsed = 0; + int outSampleCount = 0; + + // Start by copying any samples still in the Y buffer to the output + // buffer + if ((this.Yp != 0) && (outBufferLen - outSampleCount) > 0) { + int len = Math.min(outBufferLen - outSampleCount, this.Yp); + + buffers.consumeOutput(this.Y, 0, len); + //for (int i = 0; i < len; i++) { + // outBuffer[outBufferOffset + outSampleCount + i] = this.Y[i]; + //} + + outSampleCount += len; + for (int i = 0; i < this.Yp - len; i++) { + this.Y[i] = this.Y[i + len]; + } + this.Yp -= len; + } + + // If there are still output samples left, return now - we need + // the full output buffer available to us... + if (this.Yp != 0) { + return inBufferUsed == 0 && outSampleCount == 0; + } + + // Account for increased filter gain when using factors less than 1 + if (factor < 1) { + LpScl = (float) (LpScl * factor); + } + + while (true) { + + // This is the maximum number of samples we can process + // per loop iteration + + /* + * #ifdef DEBUG + * printf("XSize: %d Xoff: %d Xread: %d Xp: %d lastFlag: %d\n", + * this.XSize, this.Xoff, this.Xread, this.Xp, lastFlag); #endif + */ + + // Copy as many samples as we can from the input buffer into X + int len = this.XSize - this.Xread; + + if (len >= inBufferLen - inBufferUsed) { + len = inBufferLen - inBufferUsed; + } + + buffers.produceInput(this.X, this.Xread, len); + //for (int i = 0; i < len; i++) { + // this.X[this.Xread + i] = inBuffer[inBufferOffset + inBufferUsed + i]; + //} + + inBufferUsed += len; + this.Xread += len; + + int Nx; + if (lastBatch && (inBufferUsed == inBufferLen)) { + // If these are the last samples, zero-pad the + // end of the input buffer and make sure we process + // all the way to the end + Nx = this.Xread - this.Xoff; + for (int i = 0; i < this.Xoff; i++) { + this.X[this.Xread + i] = 0; + } + } else { + Nx = this.Xread - 2 * this.Xoff; + } + + /* + * #ifdef DEBUG fprintf(stderr, "new len=%d Nx=%d\n", len, Nx); + * #endif + */ + + if (Nx <= 0) { + break; + } + + // Resample stuff in input buffer + int Nout; + if (factor >= 1) { // SrcUp() is faster if we can use it */ + Nout = lrsSrcUp(this.X, this.Y, factor, /* &this.Time, */Nx, Nwing, LpScl, Imp, ImpD, interpFilt); + } else { + Nout = lrsSrcUD(this.X, this.Y, factor, /* &this.Time, */Nx, Nwing, LpScl, Imp, ImpD, interpFilt); + } + + /* + * #ifdef DEBUG + * printf("Nout: %d\n", Nout); + * #endif + */ + + this.Time -= Nx; // Move converter Nx samples back in time + this.Xp += Nx; // Advance by number of samples processed + + // Calc time accumulation in Time + int Ncreep = (int) (this.Time) - this.Xoff; + if (Ncreep != 0) { + this.Time -= Ncreep; // Remove time accumulation + this.Xp += Ncreep; // and add it to read pointer + } + + // Copy part of input signal that must be re-used + int Nreuse = this.Xread - (this.Xp - this.Xoff); + + for (int i = 0; i < Nreuse; i++) { + this.X[i] = this.X[i + (this.Xp - this.Xoff)]; + } + + /* + #ifdef DEBUG + printf("New Xread=%d\n", Nreuse); + #endif */ + + this.Xread = Nreuse; // Pos in input buff to read new data into + this.Xp = this.Xoff; + + this.Yp = Nout; + + // Copy as many samples as possible to the output buffer + if (this.Yp != 0 && (outBufferLen - outSampleCount) > 0) { + len = Math.min(outBufferLen - outSampleCount, this.Yp); + + buffers.consumeOutput(this.Y, 0, len); + //for (int i = 0; i < len; i++) { + // outBuffer[outBufferOffset + outSampleCount + i] = this.Y[i]; + //} + + outSampleCount += len; + for (int i = 0; i < this.Yp - len; i++) { + this.Y[i] = this.Y[i + len]; + } + this.Yp -= len; + } + + // If there are still output samples left, return now, + // since we need the full output buffer available + if (this.Yp != 0) { + break; + } + } + + return inBufferUsed == 0 && outSampleCount == 0; + } + + /** + * Process a batch of samples. Convenience method for when the input and output are both floats. + * + * @param factor factor at which to resample this batch + * @param inputBuffer contains input samples in the range -1.0 to 1.0 + * @param outputBuffer output samples will be deposited here + * @param lastBatch true if this is known to be the last batch of samples + * @return true iff resampling is complete (ie. no input samples consumed and no output samples produced) + */ + public boolean process(double factor, final FloatBuffer inputBuffer, boolean lastBatch, final FloatBuffer outputBuffer) { + SampleBuffers sampleBuffers = new SampleBuffers() { + public int getInputBufferLength() { + return inputBuffer.remaining(); + } + + public int getOutputBufferLength() { + return outputBuffer.remaining(); + } + + public void produceInput(float[] array, int offset, int length) { + inputBuffer.get(array, offset, length); + } + + public void consumeOutput(float[] array, int offset, int length) { + outputBuffer.put(array, offset, length); + } + }; + return process(factor, sampleBuffers, lastBatch); + } + + /** + * Process a batch of samples. Alternative interface if you prefer to work with arrays. + * + * @param factor resampling rate for this batch + * @param inBuffer array containing input samples in the range -1.0 to 1.0 + * @param inBufferOffset offset into inBuffer at which to start processing + * @param inBufferLen number of valid elements in the inputBuffer + * @param lastBatch pass true if this is the last batch of samples + * @param outBuffer array to hold the resampled data + * @param outBufferOffset Offset in the output buffer. + * @param outBufferLen Output buffer length. + * @return the number of samples consumed and generated + */ + public Result process(double factor, float[] inBuffer, int inBufferOffset, int inBufferLen, boolean lastBatch, float[] outBuffer, int outBufferOffset, int outBufferLen) { + FloatBuffer inputBuffer = FloatBuffer.wrap(inBuffer, inBufferOffset, inBufferLen); + FloatBuffer outputBuffer = FloatBuffer.wrap(outBuffer, outBufferOffset, outBufferLen); + + process(factor, inputBuffer, lastBatch, outputBuffer); + + return new Result(inputBuffer.position() - inBufferOffset, outputBuffer.position() - outBufferOffset); + } + + + + /* + * Sampling rate up-conversion only subroutine; Slightly faster than + * down-conversion; + */ + private int lrsSrcUp(float[] X, float[] Y, double factor, int Nx, int Nwing, float LpScl, float[] Imp, + float[] ImpD, boolean Interp) { + + float[] Xp_array = X; + int Xp_index; + + float[] Yp_array = Y; + int Yp_index = 0; + + float v; + + double CurrentTime = this.Time; + double dt; // Step through input signal + double endTime; // When Time reaches EndTime, return to user + + dt = 1.0 / factor; // Output sampling period + + endTime = CurrentTime + Nx; + while (CurrentTime < endTime) { + double LeftPhase = CurrentTime - Math.floor(CurrentTime); + double RightPhase = 1.0 - LeftPhase; + + Xp_index = (int) CurrentTime; // Ptr to current input sample + // Perform left-wing inner product + v = FilterKit.lrsFilterUp(Imp, ImpD, Nwing, Interp, Xp_array, Xp_index++, LeftPhase, -1); + // Perform right-wing inner product + v += FilterKit.lrsFilterUp(Imp, ImpD, Nwing, Interp, Xp_array, Xp_index, RightPhase, 1); + + v *= LpScl; // Normalize for unity filter gain + + Yp_array[Yp_index++] = v; // Deposit output + CurrentTime += dt; // Move to next sample by time increment + } + + this.Time = CurrentTime; + return Yp_index; // Return the number of output samples + } + + private int lrsSrcUD(float[] X, float[] Y, double factor, int Nx, int Nwing, float LpScl, float[] Imp, + float[] ImpD, boolean Interp) { + + float[] Xp_array = X; + int Xp_index; + + float[] Yp_array = Y; + int Yp_index = 0; + + float v; + + double CurrentTime = this.Time; + double dh; // Step through filter impulse response + double dt; // Step through input signal + double endTime; // When Time reaches EndTime, return to user + + dt = 1.0 / factor; // Output sampling period + + dh = Math.min(Npc, factor * Npc); // Filter sampling period + + endTime = CurrentTime + Nx; + while (CurrentTime < endTime) { + double LeftPhase = CurrentTime - Math.floor(CurrentTime); + double RightPhase = 1.0 - LeftPhase; + + Xp_index = (int) CurrentTime; // Ptr to current input sample + // Perform left-wing inner product + v = FilterKit.lrsFilterUD(Imp, ImpD, Nwing, Interp, Xp_array, Xp_index++, LeftPhase, -1, dh); + // Perform right-wing inner product + v += FilterKit.lrsFilterUD(Imp, ImpD, Nwing, Interp, Xp_array, Xp_index, RightPhase, 1, dh); + + v *= LpScl; // Normalize for unity filter gain + + Yp_array[Yp_index++] = v; // Deposit output + + CurrentTime += dt; // Move to next sample by time increment + } + + this.Time = CurrentTime; + return Yp_index; // Return the number of output samples + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/resample/SampleBuffers.java b/app/src/main/java/be/tarsos/dsp/resample/SampleBuffers.java new file mode 100644 index 0000000..c75b1e7 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/resample/SampleBuffers.java @@ -0,0 +1,72 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/****************************************************************************** + * + * libresample4j + * Copyright (c) 2009 Laszlo Systems, Inc. All Rights Reserved. + * + * libresample4j is a Java port of Dominic Mazzoni's libresample 0.1.3, + * which is in turn based on Julius Smith's Resample 1.7 library. + * http://www-ccrma.stanford.edu/~jos/resample/ + * + * License: LGPL -- see the file LICENSE.txt for more information + * + *****************************************************************************/ +package be.tarsos.dsp.resample; + +/** + * Callback for producing and consuming samples. Enables on-the-fly conversion between sample types + * (signed 16-bit integers to floats, for example) and/or writing directly to an output stream. + */ +interface SampleBuffers { + /** + * @return number of input samples available + */ + + int getInputBufferLength(); + + /** + * @return number of samples the output buffer has room for + */ + int getOutputBufferLength(); + + /** + * Copy length samples from the input buffer to the given array, starting at the given offset. + * Samples should be in the range -1.0f to 1.0f. + * + * @param array array to hold samples from the input buffer + * @param offset start writing samples here + * @param length write this many samples + */ + void produceInput(float[] array, int offset, int length); + + /** + * Copy length samples from the given array to the output buffer, starting at the given offset. + * + * @param array array to read from + * @param offset start reading samples here + * @param length read this many samples + */ + void consumeOutput(float[] array, int offset, int length); +} diff --git a/app/src/main/java/be/tarsos/dsp/resample/SoundTouchRateTransposer.java b/app/src/main/java/be/tarsos/dsp/resample/SoundTouchRateTransposer.java new file mode 100644 index 0000000..7df1bef --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/resample/SoundTouchRateTransposer.java @@ -0,0 +1,95 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.resample; + +import be.tarsos.dsp.AudioDispatcher; +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; + +/** + * Currently not working sample rate transposer, works only for integer factors. + * Changes sample rate by using linear interpolation. + * + * Together with the time stretcher this can be used for pitch shifting. + * @author Joren Six + * @author Olli Parviainen + */ +public class SoundTouchRateTransposer implements AudioProcessor { + + private final double rate; + int slopeCount; + double prevSample; + private AudioDispatcher dispatcher; + + public void setDispatcher(AudioDispatcher newDispatcher){ + this.dispatcher = newDispatcher; + } + + public SoundTouchRateTransposer(double d){ + this.rate = d; + } + + @Override + public boolean process(AudioEvent audioEvent) { + int i, used; + float[] src = audioEvent.getFloatBuffer(); + float[] dest = new float[(int) Math.round(audioEvent.getBufferSize() / rate)]; + used = 0; + i = 0; + + // Process the last sample saved from the previous call first... + while (slopeCount <= 1.0f) { + dest[i] = (float)((1.0f - slopeCount) * prevSample + slopeCount * src[0]); + i++; + slopeCount += rate; + } + slopeCount -= 1.0f; + end: + while(true){ + while (slopeCount > 1.0f) { + slopeCount -= 1.0f; + used++; + if (used >= src.length - 1) + break end; + } + if(i < dest.length){ + dest[i] = (float)((1.0f - slopeCount) * src[used] + slopeCount * src[used + 1]); + } + i++; + slopeCount += rate; + } + + //Store the last sample for the next round + prevSample = src[src.length - 1]; + dispatcher.setStepSizeAndOverlap(dest.length, 0); + audioEvent.setFloatBuffer(dest); + return true; + } + + @Override + public void processingFinished() { + + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/resample/package-info.java b/app/src/main/java/be/tarsos/dsp/resample/package-info.java new file mode 100644 index 0000000..c223f43 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/resample/package-info.java @@ -0,0 +1,28 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/** + * A package with everything needed to resample audio. + */ +package be.tarsos.dsp.resample; diff --git a/app/src/main/java/be/tarsos/dsp/synthesis/AmplitudeLFO.java b/app/src/main/java/be/tarsos/dsp/synthesis/AmplitudeLFO.java new file mode 100644 index 0000000..31c63e0 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/synthesis/AmplitudeLFO.java @@ -0,0 +1,76 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.synthesis; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; + +/** + * A low frequency oscillator to change the amplitude of a signal. + */ +public class AmplitudeLFO implements AudioProcessor { + + private final double frequency; + private final double scaleParameter; + private double phase; + + /** + * Create a new low frequency oscillator with a default frequency (1.5Hz and scale 0.75) + */ + public AmplitudeLFO(){ + this(1.5,0.75); + } + + /** + * Create a new low frequency oscillator + * @param frequency The frequency in Hz + * @param scaleParameter The scale between 0 and 1 to modify the amplitude. + */ + public AmplitudeLFO(double frequency, double scaleParameter){ + this.frequency = frequency; + this.scaleParameter = scaleParameter; + phase = 0; + } + + + @Override + public boolean process(AudioEvent audioEvent) { + float[] buffer = audioEvent.getFloatBuffer(); + double sampleRate = audioEvent.getSampleRate(); + double twoPiF = 2 * Math.PI * frequency; + double time = 0; + for(int i = 0 ; i < buffer.length ; i++){ + time = i / sampleRate; + float gain = (float) (scaleParameter * Math.sin(twoPiF * time + phase)); + buffer[i] = gain * buffer[i]; + } + phase = twoPiF * buffer.length / sampleRate + phase; + return true; + } + + @Override + public void processingFinished() { + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/synthesis/NoiseGenerator.java b/app/src/main/java/be/tarsos/dsp/synthesis/NoiseGenerator.java new file mode 100644 index 0000000..f147b53 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/synthesis/NoiseGenerator.java @@ -0,0 +1,56 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.synthesis; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; + +public class NoiseGenerator implements AudioProcessor{ + + private final double gain; + + public NoiseGenerator(){ + this(1.0); + } + + public NoiseGenerator(double gain){ + this.gain = gain; + } + + @Override + public boolean process(AudioEvent audioEvent) { + float[] buffer = audioEvent.getFloatBuffer(); + for(int i = 0 ; i < buffer.length ; i++){ + buffer[i] += (float) (Math.random() * gain); + } + return true; + } + + @Override + public void processingFinished() { + } + + + +} diff --git a/app/src/main/java/be/tarsos/dsp/synthesis/PitchResyntheziser.java b/app/src/main/java/be/tarsos/dsp/synthesis/PitchResyntheziser.java new file mode 100644 index 0000000..494b92b --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/synthesis/PitchResyntheziser.java @@ -0,0 +1,137 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.synthesis; + +import java.util.Arrays; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.EnvelopeFollower; +import be.tarsos.dsp.pitch.PitchDetectionHandler; +import be.tarsos.dsp.pitch.PitchDetectionResult; + +/** + * This pitch detection handler replaces the audio buffer in the pipeline with a + * synthesized wave. It either follows the envelope of the original signal or + * not. Use it wisely. The following demonstrates how it can be used. + * + *
+ * 
+ * PitchEstimationAlgorithm algo = PitchEstimationAlgorithm.FFT_YIN;
+ * PitchResyntheziser prs = new PitchResyntheziser(samplerate);
+ * AudioDispatcher dispatcher = AudioDispatcher.fromFile(new File("in.wav"),1024, 0);
+ * //Handle pitch detection
+ * dispatcher.addAudioProcessor(new PitchProcessor(algo, samplerate, size, prs));
+ * //Write the synthesized pitch to an output file.
+ * dispatcher.addAudioProcessor(new WaveformWriter(format, "out.wav"));//
+ * dispatcher.run();
+ * 
+ * 
+ * + * @author Joren Six + */ +public class PitchResyntheziser implements PitchDetectionHandler { + + private double phase = 0; + private double phaseFirst = 0; + private double phaseSecond = 0; + private double prevFrequency = 0; + private final float samplerate; + private final EnvelopeFollower envelopeFollower; + private final boolean usePureSine; + private final boolean followEnvelope; + private final double[] previousFrequencies; + private int previousFrequencyIndex; + + public PitchResyntheziser(float samplerate){ + this(samplerate,true,false); + } + + public PitchResyntheziser(float samplerate,boolean followEnvelope,boolean pureSine){ + this(samplerate,followEnvelope,pureSine,5); + } + + public PitchResyntheziser(float samplerate,boolean followEnvelope,boolean pureSine,int filterSize){ + envelopeFollower = new EnvelopeFollower(samplerate,0.005,0.01); + this.followEnvelope=followEnvelope; + this.usePureSine = pureSine; + this.samplerate = samplerate; + previousFrequencies = new double[filterSize]; + previousFrequencyIndex = 0; + } + + @Override + public void handlePitch(PitchDetectionResult pitchDetectionResult, + AudioEvent audioEvent) { + double frequency = pitchDetectionResult.getPitch(); + + if(frequency==-1){ + frequency=prevFrequency; + }else{ + if(previousFrequencies.length!=0){ + //median filter + //store and adjust pointer + previousFrequencies[previousFrequencyIndex] = frequency; + previousFrequencyIndex++; + previousFrequencyIndex %= previousFrequencies.length; + //sort to get median frequency + double[] frequenciesCopy = previousFrequencies.clone(); + Arrays.sort(frequenciesCopy); + //use the median as frequency + frequency = frequenciesCopy[frequenciesCopy.length/2]; + } + + prevFrequency = frequency; + } + + + + final double twoPiF = 2 * Math.PI * frequency; + float[] audioBuffer = audioEvent.getFloatBuffer(); + float[] envelope = null; + if(followEnvelope){ + envelope = audioBuffer.clone(); + envelopeFollower.calculateEnvelope(envelope); + } + + for (int sample = 0; sample < audioBuffer.length; sample++) { + double time = sample / samplerate; + double wave = Math.sin(twoPiF * time + phase); + if(!usePureSine){ + wave += 0.05 * Math.sin(twoPiF * 4 * time + phaseFirst); + wave += 0.01 * Math.sin(twoPiF * 8 * time + phaseSecond); + } + audioBuffer[sample] = (float) wave; + if(followEnvelope){ + audioBuffer[sample] = audioBuffer[sample] * envelope[sample]; + } + } + + double timefactor = twoPiF * audioBuffer.length / samplerate; + phase = timefactor + phase; + if(!usePureSine){ + phaseFirst = 4 * timefactor + phaseFirst; + phaseSecond = 8 * timefactor + phaseSecond; + } + } +} diff --git a/app/src/main/java/be/tarsos/dsp/synthesis/SineGenerator.java b/app/src/main/java/be/tarsos/dsp/synthesis/SineGenerator.java new file mode 100644 index 0000000..ebc81e6 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/synthesis/SineGenerator.java @@ -0,0 +1,62 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.synthesis; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; + +public class SineGenerator implements AudioProcessor{ + + private final double gain; + private final double frequency; + private double phase; + + public SineGenerator(){ + this(1.0,440); + } + + public SineGenerator(double gain,double frequency){ + this.gain = gain; + this.frequency = frequency; + this.phase = 0; + } + + @Override + public boolean process(AudioEvent audioEvent) { + float[] buffer = audioEvent.getFloatBuffer(); + double sampleRate = audioEvent.getSampleRate(); + double twoPiF = 2 * Math.PI * frequency; + double time = 0; + for(int i = 0 ; i < buffer.length ; i++){ + time = i / sampleRate; + buffer[i] += (float) (gain * Math.sin(twoPiF * time + phase)); + } + phase = twoPiF * buffer.length / sampleRate + phase; + return true; + } + + @Override + public void processingFinished() { + } +} diff --git a/app/src/main/java/be/tarsos/dsp/synthesis/package-info.java b/app/src/main/java/be/tarsos/dsp/synthesis/package-info.java new file mode 100644 index 0000000..0c93db1 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/synthesis/package-info.java @@ -0,0 +1,28 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/** + * Some audio generates, sine waves, noise,.... + */ +package be.tarsos.dsp.synthesis; diff --git a/app/src/main/java/be/tarsos/dsp/util/AudioResourceUtils.java b/app/src/main/java/be/tarsos/dsp/util/AudioResourceUtils.java new file mode 100644 index 0000000..97bd7b9 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/AudioResourceUtils.java @@ -0,0 +1,170 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.util; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.MalformedURLException; +import java.net.URL; + +/** + * Some utility functions to handle audio resources. + * + * @author Joren Six + */ +public class AudioResourceUtils { + + private AudioResourceUtils() { + } + + /** + * Returns a more practical audio resource name. E.g. if + * http://stream.com/stream.pls is given, the PLS-file is parsed and the + * first audio file is returned. It supports PLS, M3U, AXS and XSPF" + * + * @param inputResource + * The input resource, a file, URL, PLS-file or M3U-file. + * + * @return A more practical audio resource name. + */ + public static String sanitizeResource(String inputResource) { + if (inputResource.toLowerCase().endsWith("pls")) { + inputResource = parsePLS(inputResource); + } else if (inputResource.toLowerCase().endsWith("m3u")) { + inputResource = parseM3U(inputResource); + } else if (inputResource.toLowerCase().endsWith("asx")){ + inputResource = parseASX(inputResource); + } else if (inputResource.toLowerCase().endsWith("xspf")){ + inputResource = parseXSPF(inputResource); + } + return inputResource; + } + + private static String parseXSPF(String inputResource){ + String inputFile = ""; + try { + String contents = readTextFromUrl(new URL(inputResource)); + for (String line : contents.split("\n")) { + if (line.toLowerCase().contains("href")) { + String pattern = "(?i)(.*).*"; + inputFile = line.replaceAll(pattern, "$1"); + break; + } + } + } catch (MalformedURLException e) { + e.printStackTrace(); + } + return inputFile; + } + + private static String parseASX(String inputResource) { + String inputFile = ""; + try { + String contents = readTextFromUrl(new URL(inputResource)); + for (String line : contents.split("\n")) { + if (line.toLowerCase().contains("href")) { + String pattern = "(?i).*href=\"(.*)\".*"; + inputFile = line.replaceAll(pattern, "$1"); + break; + } + } + } catch (MalformedURLException e) { + e.printStackTrace(); + } + return inputFile; + } + + /** + * Parses the PLS file and returns the first file name. + * + * @param inputUrl + * The input PLS file. + * @return The first file name in the PLS playlist. + */ + public static String parsePLS(String inputUrl) { + String inputFile = ""; + try { + String plsContents = readTextFromUrl(new URL(inputUrl)); + for (String line : plsContents.split("\n")) { + if (line.startsWith("File1=")) { + inputFile = line.replace("File1=", "").trim(); + break; + } + } + } catch (MalformedURLException e) { + e.printStackTrace(); + } + + return inputFile; + } + + /** + * Parses the M3U file and returns the first file name. + * + * @param inputUrl + * The input M3U file. + * @return The first file name in the M3U play list. + */ + public static String parseM3U(String inputUrl) { + String inputFile = ""; + try { + String plsContents = readTextFromUrl(new URL(inputUrl)); + for (String line : plsContents.split("\n")) { + if (!line.trim().isEmpty() && !line.trim().startsWith("#")) { + inputFile = line.trim(); + break; + } + } + } catch (MalformedURLException e) { + e.printStackTrace(); + } + return inputFile; + } + + /** + * Return the text of the file with the given URL. E.g. if + * http://test.be/text.txt is given the contents of text.txt is returned. + * + * @param url + * The URL. + * @return The contents of the file. + */ + public static String readTextFromUrl(URL url) { + StringBuffer fubber = new StringBuffer(); + try { + BufferedReader in = new BufferedReader(new InputStreamReader( + url.openStream())); + String inputLine; + while ((inputLine = in.readLine()) != null) { + fubber.append(inputLine).append("\n"); + } + in.close(); + } catch (IOException exception) { + exception.printStackTrace(); + } + return fubber.toString(); + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/util/BiQuadFilter.java b/app/src/main/java/be/tarsos/dsp/util/BiQuadFilter.java new file mode 100644 index 0000000..64c91a1 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/BiQuadFilter.java @@ -0,0 +1,104 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.util; + +/** + * Implements a BiQuad filter, + * which can be used for e.g. low pass filtering. + * + * The implementation is a translation of biquad.c from Aubio, Copyright (C) + * 2003-2009 Paul Brossier: piem@aubio.org + * + * @author Joren Six + * @author Paul Brossiers + */ +public class BiQuadFilter { + + private double i1; + private double i2; + private double o1; + private double o2; + private final double a2; + private final double a3; + private final double b1; + private final double b2; + private final double b3; + + /** + * Create a new biquad filter + * @param b1 first + * @param b2 second + * @param b3 third + * @param a2 a first + * @param a3 a second + */ + public BiQuadFilter(double b1, double b2, double b3, double a2, double a3) { + this.a2 = a2; + this.a3 = a3; + this.b1 = b1; + this.b2 = b2; + this.b3 = b3; + this.i1 = 0.; + this.i2 = 0.; + this.o1 = 0.; + this.o2 = 0.; + } + + public void doFiltering(float[] in, float[] tmp){ + double mir; + /* mirroring */ + mir = 2*in[0]; + i1 = mir - in[2]; + i2 = mir - in[1]; + /* apply filtering */ + doBiQuad(in); + /* invert */ + for (int j = 0; j < in.length; j++){ + tmp[in.length-j-1] = in[j]; + } + /* mirror again */ + mir = 2*tmp[0]; + i1 = mir - tmp[2]; + i2 = mir - tmp[1]; + /* apply filtering */ + doBiQuad(tmp); + /* invert back */ + for (int j = 0; j < in.length; j++){ + in[j] = tmp[in.length-j-1]; + } + } + + private void doBiQuad(float[] in) { + for (int j = 0; j < in.length; j++) { + double i0 = in[j]; + double o0 = b1 * i0 + b2 * i1 + b3 * i2 - a2 * o1 - a3 * o2; + in[j] = (float) o0; + i2 = i1; + i1 = i0; + o2 = o1; + o1 = o0; + } + } +} diff --git a/app/src/main/java/be/tarsos/dsp/util/Complex.java b/app/src/main/java/be/tarsos/dsp/util/Complex.java new file mode 100644 index 0000000..052a195 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/Complex.java @@ -0,0 +1,239 @@ +package be.tarsos.dsp.util; + + + +/** + * Complex implements a complex number and defines complex +arithmetic and mathematical functions +Last Updated February 27, 2001 +Copyright 1997-2001 +@version 1.0 +@author Andrew G. Bennett + * @author joren + * + */ +public class Complex { + +private final double x; + private final double y; + +/** + Constructs the complex number z = u + i*v + @param u Real part + @param v Imaginary part +*/ +public Complex(double u,double v) { + x=u; + y=v; +} + +/** + Real part of this Complex number + (the x-coordinate in rectangular coordinates). + @return Re[z] where z is this Complex number. +*/ +public double real() { + return x; +} + +/** + Imaginary part of this Complex number + (the y-coordinate in rectangular coordinates). + @return Im[z] where z is this Complex number. +*/ +public double imag() { + return y; +} + +/** + Modulus of this Complex number + (the distance from the origin in polar coordinates). + @return |z| where z is this Complex number. +*/ +public double mod() { + if (x!=0 || y!=0) { + return Math.sqrt(x*x+y*y); + } else { + return 0d; + } +} + +/** + Argument of this Complex number + (the angle in radians with the x-axis in polar coordinates). + @return arg(z) where z is this Complex number. +*/ +public double arg() { + return Math.atan2(y,x); +} + +/** + Complex conjugate of this Complex number + (the conjugate of x+i*y is x-i*y). + @return z-bar where z is this Complex number. +*/ +public Complex conj() { + return new Complex(x,-y); +} + +/** + Addition of Complex numbers (doesn't change this Complex number). +
(x+i*y) + (s+i*t) = (x+s)+i*(y+t). + @param w is the number to add. + @return z+w where z is this Complex number. +*/ +public Complex plus(Complex w) { + return new Complex(x+w.real(),y+w.imag()); +} + +/** + Subtraction of Complex numbers (doesn't change this Complex number). +
(x+i*y) - (s+i*t) = (x-s)+i*(y-t). + @param w is the number to subtract. + @return z-w where z is this Complex number. +*/ +public Complex minus(Complex w) { + return new Complex(x-w.real(),y-w.imag()); +} + +/** + Complex multiplication (doesn't change this Complex number). + @param w is the number to multiply by. + @return z*w where z is this Complex number. +*/ +public Complex times(Complex w) { + return new Complex(x*w.real()-y*w.imag(),x*w.imag()+y*w.real()); +} + +/** + Division of Complex numbers (doesn't change this Complex number). +
(x+i*y)/(s+i*t) = ((x*s+y*t) + i*(y*s-y*t)) / (s^2+t^2) + @param w is the number to divide by + @return new Complex number z/w where z is this Complex number +*/ +public Complex div(Complex w) { + double den=Math.pow(w.mod(),2); + return new Complex((x*w.real()+y*w.imag())/den,(y*w.real()-x*w.imag())/den); +} + +/** + Complex exponential (doesn't change this Complex number). + @return exp(z) where z is this Complex number. +*/ +public Complex exp() { + return new Complex(Math.exp(x)*Math.cos(y),Math.exp(x)*Math.sin(y)); +} + +/** + Principal branch of the Complex logarithm of this Complex number. + (doesn't change this Complex number). + The principal branch is the branch with -pi < arg <= pi. + @return log(z) where z is this Complex number. +*/ +public Complex log() { + return new Complex(Math.log(this.mod()),this.arg()); +} + +/** + Complex square root (doesn't change this complex number). + Computes the principal branch of the square root, which + is the value with 0 <= arg < pi. + @return sqrt(z) where z is this Complex number. +*/ +public Complex sqrt() { + double r=Math.sqrt(this.mod()); + double theta=this.arg()/2; + return new Complex(r*Math.cos(theta),r*Math.sin(theta)); +} + +// Real cosh function (used to compute complex trig functions) +private double cosh(double theta) { + return (Math.exp(theta)+Math.exp(-theta))/2; +} + +// Real sinh function (used to compute complex trig functions) +private double sinh(double theta) { + return (Math.exp(theta)-Math.exp(-theta))/2; +} + +/** + Sine of this Complex number (doesn't change this Complex number). +
sin(z) = (exp(i*z)-exp(-i*z))/(2*i). + @return sin(z) where z is this Complex number. +*/ +public Complex sin() { + return new Complex(cosh(y)*Math.sin(x),sinh(y)*Math.cos(x)); +} + +/** + Cosine of this Complex number (doesn't change this Complex number). +
cos(z) = (exp(i*z)+exp(-i*z))/ 2. + @return cos(z) where z is this Complex number. +*/ +public Complex cos() { + return new Complex(cosh(y)*Math.cos(x),-sinh(y)*Math.sin(x)); +} + +/** + Hyperbolic sine of this Complex number + (doesn't change this Complex number). +
sinh(z) = (exp(z)-exp(-z))/2. + @return sinh(z) where z is this Complex number. +*/ +public Complex sinh() { + return new Complex(sinh(x)*Math.cos(y),cosh(x)*Math.sin(y)); +} + +/** + Hyperbolic cosine of this Complex number + (doesn't change this Complex number). +
cosh(z) = (exp(z) + exp(-z)) / 2. + @return cosh(z) where z is this Complex number. +*/ +public Complex cosh() { + return new Complex(cosh(x)*Math.cos(y),sinh(x)*Math.sin(y)); +} + +/** + Tangent of this Complex number (doesn't change this Complex number). +
tan(z) = sin(z)/cos(z). + @return tan(z) where z is this Complex number. +*/ +public Complex tan() { + return (this.sin()).div(this.cos()); +} + +/** + Negative of this complex number (chs stands for change sign). + This produces a new Complex number and doesn't change + this Complex number. +
-(x+i*y) = -x-i*y. + @return -z where z is this Complex number. +*/ +public Complex chs() { + return new Complex(-x,-y); +} + +/** + String representation of this Complex number. + @return x+i*y, x-i*y, x, or i*y as appropriate. +*/ +public String toString() { + if (x!=0 && y>0) { + return x+" + "+y+"i"; + } + if (x!=0 && y<0) { + return x+" - "+(-y)+"i"; + } + if (y==0) { + return String.valueOf(x); + } + if (x==0) { + return y+"i"; + } + // shouldn't get here (unless Inf or NaN) + return x+" + i*"+y; + +} +} + diff --git a/app/src/main/java/be/tarsos/dsp/util/ConcurrencyUtils.java b/app/src/main/java/be/tarsos/dsp/util/ConcurrencyUtils.java new file mode 100644 index 0000000..ac3153c --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/ConcurrencyUtils.java @@ -0,0 +1,331 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/* ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is Parallel Colt. + * + * The Initial Developer of the Original Code is + * Piotr Wendykier, Emory University. + * Portions created by the Initial Developer are Copyright (C) 2007-2009 + * the Initial Developer. All Rights Reserved. + * + * Alternatively, the contents of this file may be used under the terms of + * either the GNU General Public License Version 2 or later (the "GPL"), or + * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ +package be.tarsos.dsp.util; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadFactory; + +/** + * Concurrency utilities. + * + * @author Piotr Wendykier (piotr.wendykier@gmail.com) + */ +public class ConcurrencyUtils { + /** + * Thread pool. + */ + private static final ExecutorService THREAD_POOL = Executors.newCachedThreadPool(new CustomThreadFactory(new CustomExceptionHandler())); + + private static int THREADS_BEGIN_N_1D_FFT_2THREADS = 8192; + + private static int THREADS_BEGIN_N_1D_FFT_4THREADS = 65536; + + private static int THREADS_BEGIN_N_2D = 65536; + + private static int THREADS_BEGIN_N_3D = 65536; + + private static int NTHREADS = prevPow2(getNumberOfProcessors()); + + private ConcurrencyUtils() { + + } + + private static class CustomExceptionHandler implements Thread.UncaughtExceptionHandler { + public void uncaughtException(Thread t, Throwable e) { + e.printStackTrace(); + } + + } + + private static class CustomThreadFactory implements ThreadFactory { + private static final ThreadFactory defaultFactory = Executors.defaultThreadFactory(); + + private final Thread.UncaughtExceptionHandler handler; + + CustomThreadFactory(Thread.UncaughtExceptionHandler handler) { + this.handler = handler; + } + + public Thread newThread(Runnable r) { + Thread t = defaultFactory.newThread(r); + t.setUncaughtExceptionHandler(handler); + return t; + } + } + + /** + * Returns the number of available processors. + * + * @return number of available processors + */ + public static int getNumberOfProcessors() { + return Runtime.getRuntime().availableProcessors(); + } + + /** + * Returns the current number of threads. + * + * @return the current number of threads. + */ + public static int getNumberOfThreads() { + return NTHREADS; + } + + /** + * Sets the number of threads. If n is not a power-of-two number, then the + * number of threads is set to the closest power-of-two number less than n. + * + * @param n The number of threads + */ + public static void setNumberOfThreads(int n) { + NTHREADS = prevPow2(n); + } + + /** + * Returns the minimal size of 1D data for which two threads are used. + * + * @return the minimal size of 1D data for which two threads are used + */ + public static int getThreadsBeginN_1D_FFT_2Threads() { + return THREADS_BEGIN_N_1D_FFT_2THREADS; + } + + /** + * Returns the minimal size of 1D data for which four threads are used. + * + * @return the minimal size of 1D data for which four threads are used + */ + public static int getThreadsBeginN_1D_FFT_4Threads() { + return THREADS_BEGIN_N_1D_FFT_4THREADS; + } + + /** + * Returns the minimal size of 2D data for which threads are used. + * + * @return the minimal size of 2D data for which threads are used + */ + public static int getThreadsBeginN_2D() { + return THREADS_BEGIN_N_2D; + } + + /** + * Returns the minimal size of 3D data for which threads are used. + * + * @return the minimal size of 3D data for which threads are used + */ + public static int getThreadsBeginN_3D() { + return THREADS_BEGIN_N_3D; + } + + /** + * Sets the minimal size of 1D data for which two threads are used. + * + * @param n + * the minimal size of 1D data for which two threads are used + */ + public static void setThreadsBeginN_1D_FFT_2Threads(int n) { + if (n < 512) { + THREADS_BEGIN_N_1D_FFT_2THREADS = 512; + } else { + THREADS_BEGIN_N_1D_FFT_2THREADS = n; + } + } + + /** + * Sets the minimal size of 1D data for which four threads are used. + * + * @param n + * the minimal size of 1D data for which four threads are used + */ + public static void setThreadsBeginN_1D_FFT_4Threads(int n) { + if (n < 512) { + THREADS_BEGIN_N_1D_FFT_4THREADS = 512; + } else { + THREADS_BEGIN_N_1D_FFT_4THREADS = n; + } + } + + /** + * Sets the minimal size of 2D data for which threads are used. + * + * @param n + * the minimal size of 2D data for which threads are used + */ + public static void setThreadsBeginN_2D(int n) { + THREADS_BEGIN_N_2D = n; + } + + /** + * Sets the minimal size of 3D data for which threads are used. + * + * @param n + * the minimal size of 3D data for which threads are used + */ + public static void setThreadsBeginN_3D(int n) { + THREADS_BEGIN_N_3D = n; + } + + /** + * Resets the minimal size of 1D data for which two and four threads are + * used. + */ + public static void resetThreadsBeginN_FFT() { + THREADS_BEGIN_N_1D_FFT_2THREADS = 8192; + THREADS_BEGIN_N_1D_FFT_4THREADS = 65536; + } + + /** + * Resets the minimal size of 2D and 3D data for which threads are used. + */ + public static void resetThreadsBeginN() { + THREADS_BEGIN_N_2D = 65536; + THREADS_BEGIN_N_3D = 65536; + } + + /** + * Returns the closest power-of-two number greater than or equal to x. + * + * @param x the number to process + * @return the closest power-of-two number greater than or equal to x + */ + public static int nextPow2(int x) { + if (x < 1) + throw new IllegalArgumentException("x must be greater or equal 1"); + if ((x & (x - 1)) == 0) { + return x; // x is already a power-of-two number + } + x |= (x >>> 1); + x |= (x >>> 2); + x |= (x >>> 4); + x |= (x >>> 8); + x |= (x >>> 16); + x |= (x >>> 32); + return x + 1; + } + + /** + * Returns the closest power-of-two number less than or equal to x. + * + * @param x the number to process + * @return the closest power-of-two number less then or equal to x + */ + public static int prevPow2(int x) { + if (x < 1) + throw new IllegalArgumentException("x must be greater or equal 1"); + return (int) Math.pow(2, Math.floor(Math.log(x) / Math.log(2))); + } + + /** + * Checks if x is a power-of-two number. + * + * @param x the number to process + * @return true if x is a power-of-two number + */ + public static boolean isPowerOf2(int x) { + if (x <= 0) + return false; + else + return (x & (x - 1)) == 0; + } + + /** + * Causes the currently executing thread to sleep (temporarily cease + * execution) for the specified number of milliseconds. + * + * @param millis the number to millis to sleep + */ + public static void sleep(long millis) { + try { + Thread.sleep(millis); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + /** + * Submits a Runnable task for execution and returns a Future representing + * that task. + * + * @param task a Runnable task for execution + * @return a Future representing the task + */ + public static Future submit(Runnable task) { + return THREAD_POOL.submit(task); + } + + /** + * Waits for all threads to complete computation. + * + * @param futures The futures which need completion. + */ + public static void waitForCompletion(Future[] futures) { + int size = futures.length; + try { + for (int j = 0; j < size; j++) { + futures[j].get(); + } + } catch (ExecutionException ex) { + ex.printStackTrace(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } +} diff --git a/app/src/main/java/be/tarsos/dsp/util/CubicSplineFast.java b/app/src/main/java/be/tarsos/dsp/util/CubicSplineFast.java new file mode 100644 index 0000000..38540e7 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/CubicSplineFast.java @@ -0,0 +1,187 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/********************************************************** +* +* Class CubicSplineFast +* +* Class for performing an interpolation using a cubic spline +* setTabulatedArrays and interpolate adapted, with modification to +* an object-oriented approach, from Numerical Recipes in C (http://www.nr.com/) +* Stripped down version of CubicSpline - all data checks have been removed for faster running +* +* +* WRITTEN BY: Dr Michael Thomas Flanagan +* +* DATE: 26 December 2009 (Stripped down version of CubicSpline: May 2002 - 31 October 2009) +* UPDATE: 14 January 2010 +* +* DOCUMENTATION: +* See Michael Thomas Flanagan's Java library on-LineWavelet web page: +* http://www.ee.ucl.ac.uk/~mflanaga/java/CubicSplineFast.html +* http://www.ee.ucl.ac.uk/~mflanaga/java/ +* +* Copyright (c) 2002 - 2010 Michael Thomas Flanagan +* +* PERMISSION TO COPY: +* +* Permission to use, copy and modify this software and its documentation for NON-COMMERCIAL purposes is granted, without fee, +* provided that an acknowledgement to the author, Dr Michael Thomas Flanagan at www.ee.ucl.ac.uk/~mflanaga, appears in all copies +* and associated documentation or publications. +* +* Redistributions of the source code of this source code, or parts of the source codes, must retain the above copyright notice, +* this list of conditions and the following disclaimer and requires written permission from the Michael Thomas Flanagan: +* +* Redistribution in binary form of all or parts of this class must reproduce the above copyright notice, this list of conditions and +* the following disclaimer in the documentation and/or other materials provided with the distribution and requires written permission +* from the Michael Thomas Flanagan: +* +* Dr Michael Thomas Flanagan makes no representations about the suitability or fitness of the software for any or for a particular purpose. +* Dr Michael Thomas Flanagan shall not be liable for any damages suffered as a result of using, modifying or distributing this software +* or its derivatives. +* +***************************************************************************************/ + + +package be.tarsos.dsp.util; + +/** + * Class for performing an interpolation using a cubic spline + * @author Dr Michael Thomas Flanagan + */ +public class CubicSplineFast{ + + private int nPoints = 0; // no. of tabulated points + private double[] y = null; // y=f(x) tabulated function + private double[] x = null; // x in tabulated function f(x) + private double[] d2ydx2 = null; // second derivatives of y + + // Constructors + // Constructor with data arrays initialised to arrays x and y + public CubicSplineFast(double[] x, double[] y){ + this.nPoints=x.length; + this.x = new double[nPoints]; + this.y = new double[nPoints]; + this.d2ydx2 = new double[nPoints]; + for(int i=0; i=0;k--){ + this.d2ydx2[k]=this.d2ydx2[k]*this.d2ydx2[k+1]+u[k]; + } + } + + // INTERPOLATE + // Returns an interpolated value of y for a value of x from a tabulated function y=f(x) + // after the data has been entered via a constructor. + // The derivatives are calculated, bt calcDeriv(), on the first call to this method ands are + // then stored for use on all subsequent calls + public double interpolate(double xx){ + + double h=0.0D,b=0.0D,a=0.0D, yy=0.0D; + int k=0; + int klo=0; + int khi=this.nPoints-1; + while (khi-klo > 1){ + k=(khi+klo) >> 1; + if(this.x[k] > xx){ + khi=k; + } + else{ + klo=k; + } + } + h=this.x[khi]-this.x[klo]; + + if (h == 0.0){ + throw new IllegalArgumentException("Two values of x are identical: point "+klo+ " ("+this.x[klo]+") and point "+khi+ " ("+this.x[khi]+")" ); + } + else{ + a=(this.x[khi]-xx)/h; + b=(xx-this.x[klo])/h; + yy=a*this.y[klo]+b*this.y[khi]+((a*a*a-a)*this.d2ydx2[klo]+(b*b*b-b)*this.d2ydx2[khi])*(h*h)/6.0; + } + return yy; + } +} diff --git a/app/src/main/java/be/tarsos/dsp/util/FFMPEGDownloader.java b/app/src/main/java/be/tarsos/dsp/util/FFMPEGDownloader.java new file mode 100644 index 0000000..6f44024 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/FFMPEGDownloader.java @@ -0,0 +1,125 @@ +package be.tarsos.dsp.util; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.net.MalformedURLException; +import java.net.URL; +import java.nio.channels.Channels; +import java.nio.channels.ReadableByteChannel; +import java.util.logging.Logger; + +/** + * Downloads a static ffmpeg binary for several platforms: + * Windows x64 and x32 + * Max OS X x64 + * Linux x32 and x64 + * The code tries to determine the correct platform and downloads it to + * the temporary directory System.getProperty("java.io.tmpdir"). + * + * After downloading it makes the binary executable. + * The location of the downloaded binary is returned by ffmpegBinary(); + * + * @author Joren Six + */ +public class FFMPEGDownloader { + + private static String url = "https://0110.be/releases/TarsosDSP/TarsosDSP-static-ffmpeg/"; + + private final String ffmpegBinary; + + private final static Logger LOG = Logger.getLogger(FFMPEGDownloader.class.getName()); + + /** + * A new FFMPEGDownloader + */ + public FFMPEGDownloader(){ + String filename = operatingSystemName() + "_" + processorArchitecture() + "_ffmpeg" + suffix(); + url = url + filename; + + String tempDirectory = System.getProperty("java.io.tmpdir"); + String saveTo = new File(tempDirectory,filename).getAbsolutePath(); + + if(new File(saveTo).exists() && new File(saveTo).length() > 1000){ + LOG.info("Found an already download ffmpeg static binary: " + saveTo); + ffmpegBinary = saveTo; + }else{ + LOG.info("Started downloading an ffmpeg static binary from " + url + " to " + saveTo ); + downloadExecutable(saveTo); + + if(new File(saveTo).exists() && new File(saveTo).length() > 1000){ + LOG.info("Downloaded an ffmpeg static binary. Stored at: " + saveTo); + //make it executable + new File(saveTo).setExecutable(true); + ffmpegBinary = saveTo; + }else{ + //Unable to download or unknown architecture + LOG.warning("Unable to find or download an ffmpeg static binary. " + filename); + ffmpegBinary = null; + } + } + } + + /** + * The path of the downloaded ffmpeg binary or null + * @return The path of the downloaded ffmpeg binary or null + */ + public String ffmpegBinary(){ + if(ffmpegBinary!=null){ + return ffmpegBinary.replace(suffix(), ""); + } + return null; + } + + private void downloadExecutable(String saveTo){ + try{ + URL website = new URL(url); + ReadableByteChannel rbc = Channels.newChannel(website.openStream()); + FileOutputStream fos = new FileOutputStream(saveTo); + fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); + fos.close(); + }catch(MalformedURLException e){ + e.printStackTrace(); + } catch (IOException e) { + + e.printStackTrace(); + } + } + + private String operatingSystemName(){ + String name; + String operatingSystem = System.getProperty("os.name").toLowerCase(); + if(operatingSystem.indexOf("indows") > 0 ){ + name = "windows"; + }else if(operatingSystem.indexOf("nux") >= 0){ + name="linux"; + }else if(operatingSystem.indexOf("mac") >= 0){ + name="mac_os_x"; + }else{ + name = null; + } + return name; + } + + private String processorArchitecture(){ + boolean is64bit = false; + if (System.getProperty("os.name").contains("Windows")) { + is64bit = (System.getenv("ProgramFiles(x86)") != null); + } else { + is64bit = (System.getProperty("os.arch").indexOf("64") != -1); + } + if(is64bit){ + return "64_bits"; + }else{ + return "32_bits"; + } + } + + private String suffix(){ + String suffix = ""; + if (System.getProperty("os.name").contains("Windows")) { + suffix = ".exe"; + } + return suffix; + } +} diff --git a/app/src/main/java/be/tarsos/dsp/util/PeakPicker.java b/app/src/main/java/be/tarsos/dsp/util/PeakPicker.java new file mode 100644 index 0000000..37dbdf9 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/PeakPicker.java @@ -0,0 +1,167 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.util; + +import java.util.Arrays; + +/** + * Implements a moving mean adaptive threshold peak picker. + * + * The implementation is a translation of peakpicker.c from Aubio, Copyright (C) + * 2003-2009 Paul Brossier piem@aubio.org + * + * @author Joren Six + * @author Paul Brossiers + */ +public class PeakPicker { + /** thresh: offset threshold [0.033 or 0.01] */ + private double threshold; + /** win_post: median filter window length (causal part) [8] */ + private final int win_post; + /** pre: median filter window (anti-causal part) [post-1] */ + private final int win_pre; + + /** biquad low pass filter */ + private final BiQuadFilter biquad; + + /** original onsets */ + private final float[] onset_keep; + /** modified onsets */ + private final float[] onset_proc; + /** peak picked window [3] */ + private final float[] onset_peek; + /** scratch pad for biquad and median */ + private final float[] scratch; + + private float lastPeekValue; + + /** + * Initializes a new moving mean adaptive threshold peak picker. + * + * @param threshold + * The threshold defines when a peak is selected. It should be + * between zero and one, 0.3 is a reasonable value. If too many + * peaks are detected go to 0.5 - 0.8. + */ + public PeakPicker(double threshold) { + /* Low-pass filter cutoff [0.34, 1] */ + biquad = new BiQuadFilter(0.1600,0.3200,0.1600,-0.5949,0.2348); + this.threshold = threshold; + win_post = 5; + win_pre = 1; + + onset_keep = new float[win_post + win_pre +1]; + onset_proc = new float[win_post + win_pre +1]; + scratch = new float[win_post + win_pre +1]; + onset_peek = new float[3]; + } + + /** + * Sets a new threshold. + * + * @param threshold + * The threshold defines when a peak is selected. It should be + * between zero and one, 0.3 is a reasonable value. If too many + * peaks are detected go to 0.5 - 0.8. + */ + public void setThreshold(double threshold) { + this.threshold = threshold; + } + + /** + * Modified version for real time, moving mean adaptive threshold this + * method is slightly more permissive than the off-LineWavelet one, and yields to + * an increase of false positives. + * + * @param onset + * The new onset value. + * @return True if a peak is detected, false otherwise. + **/ + public boolean pickPeak(float onset) { + float mean = 0.f; + float median = 0.f; + + int length = win_post + win_pre + 1; + + + /* store onset in onset_keep */ + /* shift all elements but last, then write last */ + /* for (i=0;i onset_peek[index - 1] && + onset_peek[index] > onset_peek[index + 1] && + onset_peek[index] > 0.); + } +} diff --git a/app/src/main/java/be/tarsos/dsp/util/PitchConverter.java b/app/src/main/java/be/tarsos/dsp/util/PitchConverter.java new file mode 100644 index 0000000..c08a920 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/PitchConverter.java @@ -0,0 +1,217 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +package be.tarsos.dsp.util; + + +/** + * Converts pitch from one unit to another (and back (and back (and back ...))). + * + * @author Joren Six + */ +public final class PitchConverter { + + /** + * Hide the default constructor. + */ + private PitchConverter() { + } + + /** + * C-1 = 16.35 / 2 Hz. + */ + private static final double REF_FREQ = 8.17579892; + + /** + * Cache LOG 2 calculation. + */ + private static final double LOG_TWO = Math.log(2.0); + + /** + * A MIDI key is an integer between 0 and 127, inclusive. Within a certain + * range every pitch is mapped to a MIDI key. If a value outside the range + * is given an IllegalArugmentException is thrown. + * + * @param hertzValue + * The pitch in Hertz. + * @return An integer representing the closest midi key. + * @exception IllegalArgumentException + * if the hertzValue does not fall within the range of valid + * MIDI key frequencies. + */ + public static int hertzToMidiKey(final Double hertzValue) { + final int midiKey = (int) Math.round(hertzToMidiCent(hertzValue)); + if (midiKey < 0 || midiKey > 127) { + // TODO + // LOG.warning("MIDI is only defined between [" + midiKeyToHertz(0) + // + "," + // + midiKeyToHertz(127) + "] " + hertzValue + + // "does not map to a MIDI key."); + } + return midiKey; + } + + /** + * Calculates the frequency (Hz) for a MIDI key. + * + * @param midiKey + * The MIDI key. A MIDI key is an integer between 0 and 127, + * inclusive. + * @return A frequency in Hz corresponding to the MIDI key. + * @exception IllegalArgumentException + * If midiKey is not in the valid range between 0 and 127, + * inclusive. + */ + public static double midiKeyToHertz(final int midiKey) { + if (midiKey < 0 || midiKey > 127) { + throw new IllegalArgumentException("MIDI keys are values from 0 to 127, inclusive " + midiKey + + " is invalid."); + } + return midiCentToHertz(midiKey); + } + + /** + * Converts a Hertz value to relative cents. E.g. 440Hz is converted to 900 + * if the reference is a C. + * + * @param hertzValue + * A value in hertz. + * @return A value in relative cents. + */ + public static double hertzToRelativeCent(final double hertzValue) { + double absoluteCentValue = hertzToAbsoluteCent(hertzValue); + // make absoluteCentValue positive. E.g. -2410 => 1210 + if (absoluteCentValue < 0) { + absoluteCentValue = Math.abs(1200 + absoluteCentValue); + } + // so it can be folded to one octave. E.g. 1210 => 10 + return absoluteCentValue % 1200.0; + } + + /** + * This method is not really practical. Maybe I will need it someday. + * + * @param relativeCent + * @return public static double relativeCentToHertz(double relativeCent){ if + * (relativeCent < 0 || relativeCent >= 1200) throw new + * IllegalArgumentException + * ("Relative cent values are values from 0 to 1199, inclusive " + + * relativeCent + " is invalid."); int defaultOctave = 5; int offset + * = defaultOctave * 1200; return absoluteCentToHertz(relativeCent + + * offset); } + */ + + /** + * The reference frequency is configured. The default reference frequency is + * 16.35Hz. This is C0 on a piano keyboard with A4 tuned to 440 Hz. This + * means that 0 cents is C0; 1200 is C1; 2400 is C2; ... also -1200 cents is + * C-1 + * + * @param hertzValue + * The pitch in Hertz. + * @return The value in absolute cents using the configured reference + * frequency + */ + public static double hertzToAbsoluteCent(final double hertzValue) { + double pitchInAbsCent = 0.0; + if (hertzValue > 0) { + pitchInAbsCent = 1200 * Math.log(hertzValue / REF_FREQ) / LOG_TWO; + } else { + throw new IllegalArgumentException("Pitch in Hz schould be greater than zero, is " + hertzValue); + } + return pitchInAbsCent; + } + + /** + * Returns the frequency (Hz) of an absolute cent value. This calculation + * uses a configured reference frequency. + * + * @param absoluteCent + * The pitch in absolute cent. + * @return A pitch in Hz. + */ + public static double absoluteCentToHertz(final double absoluteCent) { + return REF_FREQ * Math.pow(2, absoluteCent / 1200.0); + } + + /** + * Converts a frequency in Hz to a MIDI CENT value using + * (12 * log2 (f / 440)) + 69
+ * E.g.
+ * 69.168 MIDI CENTS = MIDI NOTE 69 + 16,8 cents
+ * 69.168 MIDI CENTS = 440Hz + x Hz + * + * @param hertzValue + * The pitch in Hertz. + * @return The pitch in MIDI cent. + */ + public static double hertzToMidiCent(final double hertzValue) { + double pitchInMidiCent = 0.0; + if (hertzValue != 0) { + pitchInMidiCent = 12 * Math.log(hertzValue / 440) / LOG_TWO + 69; + } + return pitchInMidiCent; + } + + /** + * Converts a MIDI CENT frequency to a frequency in Hz. + * + * @param midiCent + * The pitch in MIDI CENT. + * @return The pitch in Hertz. + */ + public static double midiCentToHertz(final double midiCent) { + return 440 * Math.pow(2, (midiCent - 69) / 12d); + } + + /** + * Converts cent values to ratios. See + * "Ratios Make Cents: Conversions from ratios to cents and back again" in + * the book "Tuning Timbre Spectrum Scale" William A. Sethares. + * + * @param cent + * A cent value + * @return A ratio containing the same information. + */ + public static double centToRatio(final double cent) { + final double ratio; + ratio = Math.pow(10, Math.log10(2) * cent / 1200.0); + return ratio; + } + + /** + * Converts a ratio to cents. + * "Ratios Make Cents: Conversions from ratios to cents and back again" in + * the book "Tuning Timbre Spectrum Scale" William A. Sethares + * + * @param ratio + * A cent value + * @return A ratio containing the same information. + */ + public static double ratioToCent(final double ratio) { + final double cent; + cent = 1200 / Math.log10(2) * Math.log10(ratio); + return cent; + } +} diff --git a/app/src/main/java/be/tarsos/dsp/util/fft/BartlettHannWindow.java b/app/src/main/java/be/tarsos/dsp/util/fft/BartlettHannWindow.java new file mode 100644 index 0000000..37c6204 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/fft/BartlettHannWindow.java @@ -0,0 +1,63 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* + * Copyright (c) 2007 - 2008 by Damien Di Fede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Library General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package be.tarsos.dsp.util.fft; + +/** + * A Bartlett-Hann window function. + * + * @author Damien Di Fede + * @author Corban Brook + * @see The Bartlett-Hann Window + */ +public class BartlettHannWindow extends WindowFunction +{ + /** Constructs a Bartlett-Hann window. */ + public BartlettHannWindow() + { + } + + protected float value(int length, int index) + { + return (float) (0.62 - 0.48 * Math.abs(index / (length - 1) - 0.5) - 0.38 * Math.cos(TWO_PI * index / (length - 1))); + } +} + diff --git a/app/src/main/java/be/tarsos/dsp/util/fft/BartlettWindow.java b/app/src/main/java/be/tarsos/dsp/util/fft/BartlettWindow.java new file mode 100644 index 0000000..26458d6 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/fft/BartlettWindow.java @@ -0,0 +1,63 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* + * Copyright (c) 2007 - 2008 by Damien Di Fede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Library General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package be.tarsos.dsp.util.fft; + +/** + * A Bartlett window function. + * + * @author Damien Di Fede + * @author Corban Brook + * @see The Bartlett Window + */ +public class BartlettWindow extends WindowFunction +{ + /** Constructs a Bartlett window. */ + public BartlettWindow() + { + } + + protected float value(int length, int index) + { + return 2f / (length - 1) * ((length - 1) / 2f - Math.abs(index - (length - 1) / 2f)); + } +} + diff --git a/app/src/main/java/be/tarsos/dsp/util/fft/BlackmanHarrisNuttall.java b/app/src/main/java/be/tarsos/dsp/util/fft/BlackmanHarrisNuttall.java new file mode 100644 index 0000000..883fb75 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/fft/BlackmanHarrisNuttall.java @@ -0,0 +1,26 @@ +package be.tarsos.dsp.util.fft; + +/** + * @author joren + * See https://mgasior.web.cern.ch/mgasior/pap/FFT_resol_note.pdf + */ +public class BlackmanHarrisNuttall extends WindowFunction { + float c0 = 0.355768f; + float c1 = 0.487396f; + float c2 = 0.144232f; + float c3 = 0.012604f; + + @Override + protected float value(int length, int index) { + + float sum = 0; + + sum += c0 * Math.cos((TWO_PI * 0 * index ) / (float) (length)) ; + sum += c1 * Math.cos((TWO_PI * 1 * index ) / (float) (length)); + sum += c2 * Math.cos((TWO_PI * 2 * index ) / (float) (length)); + sum += c3 * Math.cos((TWO_PI * 3 * index ) / (float) (length)); + + return sum; + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/util/fft/BlackmanWindow.java b/app/src/main/java/be/tarsos/dsp/util/fft/BlackmanWindow.java new file mode 100644 index 0000000..6244fec --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/fft/BlackmanWindow.java @@ -0,0 +1,77 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* +* Copyright (c) 2007 - 2008 by Damien Di Fede +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU Library General Public License as published +* by the Free Software Foundation; either version 2 of the License, or +* (at your option) any later version. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU Library General Public License for more details. +* +* You should have received a copy of the GNU Library General Public +* License along with this program; if not, write to the Free Software +* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +*/ + +package be.tarsos.dsp.util.fft; + +/** + * A Blackman window function. + * + * @author Damien Di Fede + * @author Corban Brook + * @see The Blackman Window + */ +public class BlackmanWindow extends WindowFunction { + + private final float alpha; + + /** + * Constructs a Blackman window. + * + * @param alpha The Blackman alpha parameter + */ + public BlackmanWindow(float alpha) { + this.alpha = alpha; + } + + /** Constructs a Blackman window with a default alpha value of 0.16 */ + public BlackmanWindow() { + this(0.16f); + } + + protected float value(int length, int index){ + float a0 = (1 - this.alpha) / 2f; + float a1 = 0.5f; + float a2 = this.alpha / 2f; + + return a0 - a1 * (float) Math.cos(TWO_PI * index / (length - 1)) + a2 * (float) Math.cos(4 * Math.PI * index / (length - 1)); + } +} + diff --git a/app/src/main/java/be/tarsos/dsp/util/fft/CosineWindow.java b/app/src/main/java/be/tarsos/dsp/util/fft/CosineWindow.java new file mode 100644 index 0000000..4f5f394 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/fft/CosineWindow.java @@ -0,0 +1,60 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* + * Copyright (c) 2007 - 2008 by Damien Di Fede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Library General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package be.tarsos.dsp.util.fft; + +/** + * A Cosine window function. + * + * @author Damien Di Fede + * @author Corban Brook + * @see The + * Cosine Window + */ +public class CosineWindow extends WindowFunction { + /** Constructs a Cosine window. */ + public CosineWindow() { + } + + protected float value(int length, int index) { + return (float) (Math.cos(Math.PI * index / (length - 1) - Math.PI / 2)); + } +} diff --git a/app/src/main/java/be/tarsos/dsp/util/fft/FFT.java b/app/src/main/java/be/tarsos/dsp/util/fft/FFT.java new file mode 100644 index 0000000..d526e73 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/fft/FFT.java @@ -0,0 +1,239 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +package be.tarsos.dsp.util.fft; + + +/** + * Wrapper for calling a hopefully Fast Fourier transform. Makes it easy to + * switch FFT algorithm with minimal overhead. + * Support for window functions is also present. + * + * @author Joren Six + */ +public class FFT { + + /** + * Forward FFT. + */ + private final FloatFFT fft; + private final WindowFunction windowFunction; + private final int fftSize; + private final float[] window; + + /** + * Create a new fft + * @param size of this size + */ + public FFT(final int size) { + this(size,null); + } + + /** + * Create a new fft of the specified size. Apply the specified window on the samples before a forward transform. + * arning: the window is not applied in reverse when a backwards transform is requested. + * @param size The size of the fft. + * @param windowFunction Apply the specified window on the samples before a forward transform. + * arning: the window is not applied in reverse when a backwards transform is requested. + */ + public FFT(final int size, final WindowFunction windowFunction){ + fft = new FloatFFT(size); + fftSize = size; + this.windowFunction = windowFunction; + if(windowFunction==null) + window = null; + else + window = windowFunction.generateCurve(size); + } + + /** + * Computes forward DFT. + * + * @param data + * data to transform. + */ + public void forwardTransform(final float[] data) { + if(windowFunction!=null){ + for(int i = 0 ; i < data.length ; i++){ + data[i] = data[i] * window[i]; + } + //windowFunction.apply(data); + } + fft.realForward(data); + } + + /** + * do a complex forward transform + * @param data do a complex forward transform on these complex numbers + */ + public void complexForwardTransform(final float[] data) { + if(windowFunction!=null){ + for(int i = 0 ; i < data.length ; i++){ + data[i] = data[i] * window[i]; + } + //windowFunction.apply(data); + } + fft.complexForward(data); + } + + /** + * Computes inverse DFT. + * Warning, does not reverse the window function. + * @param data + * data to transform + */ + public void backwardsTransform(final float[] data) { + fft.realInverse(data, true); + } + + /** + * Calculate the frequency of the bin. + * @param binIndex The index of the bin. + * @param sampleRate The sample rate of the audio. + * @return The frequency in Hz of the bin. + */ + public double binToHz(final int binIndex, final float sampleRate) { + return binIndex * sampleRate / (double) fftSize; + } + + /** + * The size of the fft. + * @return The size of the fft. + */ + public int size(){ + return fftSize; + } + + /** + * Returns the modulus of the element at index bufferCount. The modulus, + * magnitude or absolute value is (a²+b²) ^ 0.5 with a being the real part + * and b the imaginary part of a complex number. + * + * @param data + * The FFT transformed data. + * @param index + * The index of the element. + * @return The modulus, magnitude or absolute value of the element at index + * bufferCount + */ + public float modulus(final float[] data, final int index) { + final int realIndex = 2 * index; + final int imgIndex = 2 * index + 1; + final float modulus = data[realIndex] * data[realIndex] + data[imgIndex] * data[imgIndex]; + return (float) Math.sqrt(modulus); + } + + /** + * Calculates the the modulus for each element in data and stores the result + * in amplitudes. + * + * @param data + * The input data. + * @param amplitudes + * The output modulus info or amplitude. + */ + public void modulus(final float[] data, final float[] amplitudes) { + assert data.length / 2 == amplitudes.length; + for (int i = 0; i < amplitudes.length; i++) { + amplitudes[i] = modulus(data, i); + } + } + + /** + * Computes an FFT and converts the results to polar coordinates (power and + * phase). Both the power and phase arrays must be the same length, data + * should be double the length. + * + * @param data + * The input audio signal. + * @param power + * The power (modulus) of the data. + * @param phase + * The phase of the data + */ + public void powerPhaseFFT(float[] data,float[] power, float[] phase) { + assert data.length / 2 == power.length; + assert data.length / 2 == phase.length; + if(windowFunction!=null){ + windowFunction.apply(data); + } + fft.realForward(data); + powerAndPhaseFromFFT(data, power, phase); + } + + + /** + * Returns magnitude (or power) and phase for the FFT transformed data. + * @param data The FFT transformed data. + * @param power The array where the magnitudes or powers are going to be stored. It is half the length of data (FFT size). + * @param phase The array where the phases are going to be stored. It is half the length of data (FFT size). + */ + public void powerAndPhaseFromFFT(float[] data,float[] power, float[] phase){ + phase[0] = (float) Math.PI; + power[0] = -data[0]; + for (int i = 1; i < power.length; i++) { + int realIndex = 2 * i; + int imgIndex = 2 * i + 1; + power[i] = (float) Math.sqrt(data[realIndex] * data[realIndex] + data[imgIndex] * data[imgIndex]); + phase[i] = (float) Math.atan2(data[imgIndex], data[realIndex]); + } + } + + /** + * Beatroot expects a different first power element + * @param data The FFT transformed data. + * @param power The array where the magnitudes or powers are going to be stored. It is half the length of data (FFT size). + * @param phase The array where the phases are going to be stored. It is half the length of data (FFT size). + + */ + public void powerPhaseFFTBeatRootOnset(float[] data,float[] power, float[] phase) { + powerPhaseFFT(data, power, phase); + power[0] = (float) Math.sqrt(data[0] * data[0] + data[1] * data[1]); + } + + /** + * Multiplies to arrays containing imaginary numbers. The data in the first argument + * is modified! The real part is stored at 2*i, the imaginary part 2*i+i + * @param data The array with imaginary numbers that is modified. + * @param other The array with imaginary numbers that is not modified. + * Data and other need to be the same length. + */ + public void multiply(float[] data, float[] other){ + assert data.length == other.length; + if(data.length!=other.length){ + throw new IllegalArgumentException("Both arrays with imaginary numbers shouldb e of equal length"); + } + for (int i = 1; i < data.length-1; i+=2) { + int realIndex = i; + int imgIndex = i + 1; + float tempReal = data[realIndex] * other[realIndex] + -1 * data[imgIndex] * other[imgIndex]; + float tempImg = data[realIndex] * other[imgIndex] + data[imgIndex] * other[realIndex]; + data[realIndex] = tempReal; + data[imgIndex] = tempImg; + //fix by perfecthu + //data[realIndex] = data[realIndex] * other[realIndex] + -1 * data[imgIndex] * other[imgIndex]; + //data[imgIndex] = data[realIndex] * other[imgIndex] + data[imgIndex] * other[realIndex]; + } + } +} diff --git a/app/src/main/java/be/tarsos/dsp/util/fft/FloatFFT.java b/app/src/main/java/be/tarsos/dsp/util/fft/FloatFFT.java new file mode 100644 index 0000000..2b3b9c5 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/fft/FloatFFT.java @@ -0,0 +1,6603 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/* ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is JTransforms. + * + * The Initial Developer of the Original Code is + * Piotr Wendykier, Emory University. + * Portions created by the Initial Developer are Copyright (C) 2007-2009 + * the Initial Developer. All Rights Reserved. + * + * Alternatively, the contents of this file may be used under the terms of + * either the GNU General Public License Version 2 or later (the "GPL"), or + * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +package be.tarsos.dsp.util.fft; + +import java.util.concurrent.Future; + +import be.tarsos.dsp.util.ConcurrencyUtils; + + + +/** + * Computes 1D Discrete Fourier Transform (DFT) of complex and real, single + * precision data. The size of the data can be an arbitrary number. This is a + * parallel implementation of split-radix and mixed-radix algorithms optimized + * for SMP systems.
+ *
+ * This code is derived from General Purpose FFT Package written by Takuya Ooura + * (http://www.kurims.kyoto-u.ac.jp/~ooura/fft.html) and from JFFTPack written + * by Baoshe Zhang (http://jfftpack.sourceforge.net/) + * + * @author Piotr Wendykier (piotr.wendykier@gmail.com) + * + */ +public class FloatFFT { + + private enum Plans { + SPLIT_RADIX, MIXED_RADIX, BLUESTEIN + } + + private final int n; + + private int nBluestein; + + private int[] ip; + + private float[] w; + + private int nw; + + private int nc; + + private float[] wtable; + + private float[] wtable_r; + + private float[] bk1; + + private float[] bk2; + + private final Plans plan; + + private static final int[] factors = { 4, 2, 3, 5 }; + + private static final float PI = 3.14159265358979311599796346854418516f; + + private static final float TWO_PI = 6.28318530717958623199592693708837032f; + + /** + * Creates new instance of FloatFFT. + * + * @param n + * size of data + */ + public FloatFFT(int n) { + if (n < 1) { + throw new IllegalArgumentException("n must be greater than 0"); + } + this.n = n; + + if (!ConcurrencyUtils.isPowerOf2(n)) { + if (getReminder(n, factors) >= 211) { + plan = Plans.BLUESTEIN; + nBluestein = ConcurrencyUtils.nextPow2(n * 2 - 1); + bk1 = new float[2 * nBluestein]; + bk2 = new float[2 * nBluestein]; + this.ip = new int[2 + (int) Math.ceil(2 + (1 << (int) (Math.log(nBluestein + 0.5) / Math.log(2)) / 2))]; + this.w = new float[nBluestein]; + int twon = 2 * nBluestein; + nw = ip[0]; + if (twon > (nw << 2)) { + nw = twon >> 2; + makewt(nw); + } + nc = ip[1]; + if (nBluestein > (nc << 2)) { + nc = nBluestein >> 2; + makect(nc, w, nw); + } + bluesteini(); + } else { + plan = Plans.MIXED_RADIX; + wtable = new float[4 * n + 15]; + wtable_r = new float[2 * n + 15]; + cffti(); + rffti(); + } + } else { + plan = Plans.SPLIT_RADIX; + this.ip = new int[2 + (int) Math.ceil(2 + (1 << (int) (Math.log(n + 0.5) / Math.log(2)) / 2))]; + this.w = new float[n]; + int twon = 2 * n; + nw = ip[0]; + if (twon > (nw << 2)) { + nw = twon >> 2; + makewt(nw); + } + nc = ip[1]; + if (n > (nc << 2)) { + nc = n >> 2; + makect(nc, w, nw); + } + } + } + + /** + * Computes 1D forward DFT of complex data leaving the result in + * a. Complex number is stored as two float values in + * sequence: the real and imaginary part, i.e. the size of the input array + * must be greater or equal 2*n. The physical layout of the input data has + * to be as follows:
+ * + *
+     * a[2*k] = Re[k], 
+     * a[2*k+1] = Im[k], 0<=k<n
+     * 
+ * + * @param a + * data to transform + */ + public void complexForward(float[] a) { + complexForward(a, 0); + } + + /** + * Computes 1D forward DFT of complex data leaving the result in + * a. Complex number is stored as two float values in + * sequence: the real and imaginary part, i.e. the size of the input array + * must be greater or equal 2*n. The physical layout of the input data has + * to be as follows:
+ * + *
+     * a[offa+2*k] = Re[k], 
+     * a[offa+2*k+1] = Im[k], 0<=k<n
+     * 
+ * + * @param a + * data to transform + * @param offa + * index of the first element in array a + */ + public void complexForward(float[] a, int offa) { + if (n == 1) + return; + switch (plan) { + case SPLIT_RADIX: + cftbsub(2 * n, a, offa, ip, nw, w); + break; + case MIXED_RADIX: + cfftf(a, offa, -1); + break; + case BLUESTEIN: + bluestein_complex(a, offa, -1); + break; + } + } + + /** + * Computes 1D inverse DFT of complex data leaving the result in + * a. Complex number is stored as two float values in + * sequence: the real and imaginary part, i.e. the size of the input array + * must be greater or equal 2*n. The physical layout of the input data has + * to be as follows:
+ * + *
+     * a[2*k] = Re[k], 
+     * a[2*k+1] = Im[k], 0<=k<n
+     * 
+ * + * @param a + * data to transform + * @param scale + * if true then scaling is performed + */ + public void complexInverse(float[] a, boolean scale) { + complexInverse(a, 0, scale); + } + + /** + * Computes 1D inverse DFT of complex data leaving the result in + * a. Complex number is stored as two float values in + * sequence: the real and imaginary part, i.e. the size of the input array + * must be greater or equal 2*n. The physical layout of the input data has + * to be as follows:
+ * + *
+     * a[offa+2*k] = Re[k], 
+     * a[offa+2*k+1] = Im[k], 0<=k<n
+     * 
+ * + * @param a + * data to transform + * @param offa + * index of the first element in array a + * @param scale + * if true then scaling is performed + */ + public void complexInverse(float[] a, int offa, boolean scale) { + if (n == 1) + return; + switch (plan) { + case SPLIT_RADIX: + cftfsub(2 * n, a, offa, ip, nw, w); + break; + case MIXED_RADIX: + cfftf(a, offa, +1); + break; + case BLUESTEIN: + bluestein_complex(a, offa, 1); + break; + } + if (scale) { + scale(n, a, offa, true); + } + } + + /** + * Computes 1D forward DFT of real data leaving the result in a + * . The physical layout of the output data is as follows:
+ * + * if n is even then + * + *
+     * a[2*k] = Re[k], 0<=k<n/2
+     * a[2*k+1] = Im[k], 0<k<n/2
+     * a[1] = Re[n/2]
+     * 
+ * + * if n is odd then + * + *
+     * a[2*k] = Re[k], 0<=k<(n+1)/2
+     * a[2*k+1] = Im[k], 0<k<(n-1)/2
+     * a[1] = Im[(n-1)/2]
+     * 
+ * + * This method computes only half of the elements of the real transform. The + * other half satisfies the symmetry condition. If you want the full real + * forward transform, use realForwardFull. To get back the + * original data, use realInverse on the output of this method. + * + * @param a + * data to transform + */ + public void realForward(float[] a) { + realForward(a, 0); + } + + /** + * Computes 1D forward DFT of real data leaving the result in a + * . The physical layout of the output data is as follows:
+ * + * if n is even then + * + *
+     * a[offa+2*k] = Re[k], 0<=k<n/2
+     * a[offa+2*k+1] = Im[k], 0<k<n/2
+     * a[offa+1] = Re[n/2]
+     * 
+ * + * if n is odd then + * + *
+     * a[offa+2*k] = Re[k], 0<=k<(n+1)/2
+     * a[offa+2*k+1] = Im[k], 0<k<(n-1)/2
+     * a[offa+1] = Im[(n-1)/2]
+     * 
+ * + * This method computes only half of the elements of the real transform. The + * other half satisfies the symmetry condition. If you want the full real + * forward transform, use realForwardFull. To get back the + * original data, use realInverse on the output of this method. + * + * @param a + * data to transform + * @param offa + * index of the first element in array a + */ + public void realForward(float[] a, int offa) { + if (n == 1) + return; + + switch (plan) { + case SPLIT_RADIX: + float xi; + + if (n > 4) { + cftfsub(n, a, offa, ip, nw, w); + rftfsub(n, a, offa, nc, w, nw); + } else if (n == 4) { + cftx020(a, offa); + } + xi = a[offa] - a[offa + 1]; + a[offa] += a[offa + 1]; + a[offa + 1] = xi; + break; + case MIXED_RADIX: + rfftf(a, offa); + for (int k = n - 1; k >= 2; k--) { + int idx = offa + k; + float tmp = a[idx]; + a[idx] = a[idx - 1]; + a[idx - 1] = tmp; + } + break; + case BLUESTEIN: + bluestein_real_forward(a, offa); + break; + } + } + + /** + * Computes 1D forward DFT of real data leaving the result in a + * . This method computes the full real forward transform, i.e. you will get + * the same result as from complexForward called with all + * imaginary parts equal 0. Because the result is stored in a, + * the size of the input array must greater or equal 2*n, with only the + * first n elements filled with real data. To get back the original data, + * use complexInverse on the output of this method. + * + * @param a + * data to transform + */ + public void realForwardFull(float[] a) { + realForwardFull(a, 0); + } + + /** + * Computes 1D forward DFT of real data leaving the result in a + * . This method computes the full real forward transform, i.e. you will get + * the same result as from complexForward called with all + * imaginary part equal 0. Because the result is stored in a, + * the size of the input array must greater or equal 2*n, with only the + * first n elements filled with real data. To get back the original data, + * use complexInverse on the output of this method. + * + * @param a + * data to transform + * @param offa + * index of the first element in array a + */ + public void realForwardFull(final float[] a, final int offa) { + + final int twon = 2 * n; + switch (plan) { + case SPLIT_RADIX: + realForward(a, offa); + int nthreads = ConcurrencyUtils.getNumberOfThreads(); + if ((nthreads > 1) && (n / 2 > ConcurrencyUtils.getThreadsBeginN_1D_FFT_2Threads())) { + Future[] futures = new Future[nthreads]; + int k = n / 2 / nthreads; + for (int i = 0; i < nthreads; i++) { + final int firstIdx = i * k; + final int lastIdx = (i == (nthreads - 1)) ? n / 2 : firstIdx + k; + futures[i] = ConcurrencyUtils.submit(new Runnable() { + public void run() { + int idx1, idx2; + for (int k = firstIdx; k < lastIdx; k++) { + idx1 = 2 * k; + idx2 = offa + ((twon - idx1) % twon); + a[idx2] = a[offa + idx1]; + a[idx2 + 1] = -a[offa + idx1 + 1]; + } + } + }); + } + ConcurrencyUtils.waitForCompletion(futures); + } else { + int idx1, idx2; + for (int k = 0; k < n / 2; k++) { + idx1 = 2 * k; + idx2 = offa + ((twon - idx1) % twon); + a[idx2] = a[offa + idx1]; + a[idx2 + 1] = -a[offa + idx1 + 1]; + } + } + a[offa + n] = -a[offa + 1]; + a[offa + 1] = 0; + break; + case MIXED_RADIX: + rfftf(a, offa); + int m; + if (n % 2 == 0) { + m = n / 2; + } else { + m = (n + 1) / 2; + } + for (int k = 1; k < m; k++) { + int idx1 = offa + twon - 2 * k; + int idx2 = offa + 2 * k; + a[idx1 + 1] = -a[idx2]; + a[idx1] = a[idx2 - 1]; + } + for (int k = 1; k < n; k++) { + int idx = offa + n - k; + float tmp = a[idx + 1]; + a[idx + 1] = a[idx]; + a[idx] = tmp; + } + a[offa + 1] = 0; + break; + case BLUESTEIN: + bluestein_real_full(a, offa, -1); + break; + } + } + + /** + * Computes 1D inverse DFT of real data leaving the result in a + * . The physical layout of the input data has to be as follows:
+ * + * if n is even then + * + *
+     * a[2*k] = Re[k], 0<=k<n/2
+     * a[2*k+1] = Im[k], 0<k<n/2
+     * a[1] = Re[n/2]
+     * 
+ * + * if n is odd then + * + *
+     * a[2*k] = Re[k], 0<=k<(n+1)/2
+     * a[2*k+1] = Im[k], 0<k<(n-1)/2
+     * a[1] = Im[(n-1)/2]
+     * 
+ * + * This method computes only half of the elements of the real transform. The + * other half satisfies the symmetry condition. If you want the full real + * inverse transform, use realInverseFull. + * + * @param a + * data to transform + * + * @param scale + * if true then scaling is performed + * + */ + public void realInverse(float[] a, boolean scale) { + realInverse(a, 0, scale); + } + + /** + * Computes 1D inverse DFT of real data leaving the result in a + * . The physical layout of the input data has to be as follows:
+ * + * if n is even then + * + *
+     * a[offa+2*k] = Re[k], 0<=k<n/2
+     * a[offa+2*k+1] = Im[k], 0<k<n/2
+     * a[offa+1] = Re[n/2]
+     * 
+ * + * if n is odd then + * + *
+     * a[offa+2*k] = Re[k], 0<=k<(n+1)/2
+     * a[offa+2*k+1] = Im[k], 0<k<(n-1)/2
+     * a[offa+1] = Im[(n-1)/2]
+     * 
+ * + * This method computes only half of the elements of the real transform. The + * other half satisfies the symmetry condition. If you want the full real + * inverse transform, use realInverseFull. + * + * @param a + * data to transform + * @param offa + * index of the first element in array a + * @param scale + * if true then scaling is performed + * + */ + public void realInverse(float[] a, int offa, boolean scale) { + if (n == 1) + return; + switch (plan) { + case SPLIT_RADIX: + a[offa + 1] = (float)(0.5 * (a[offa] - a[offa + 1])); + a[offa] -= a[offa + 1]; + if (n > 4) { + rftfsub(n, a, offa, nc, w, nw); + cftbsub(n, a, offa, ip, nw, w); + } else if (n == 4) { + cftxc020(a, offa); + } + if (scale) { + scale(n / 2, a, offa, false); + } + break; + case MIXED_RADIX: + for (int k = 2; k < n; k++) { + int idx = offa + k; + float tmp = a[idx - 1]; + a[idx - 1] = a[idx]; + a[idx] = tmp; + } + rfftb(a, offa); + if (scale) { + scale(n, a, offa, false); + } + break; + case BLUESTEIN: + bluestein_real_inverse(a, offa); + if (scale) { + scale(n, a, offa, false); + } + break; + } + + } + + /** + * Computes 1D inverse DFT of real data leaving the result in a + * . This method computes the full real inverse transform, i.e. you will get + * the same result as from complexInverse called with all + * imaginary part equal 0. Because the result is stored in a, + * the size of the input array must greater or equal 2*n, with only the + * first n elements filled with real data. + * + * @param a + * data to transform + * @param scale + * if true then scaling is performed + */ + public void realInverseFull(float[] a, boolean scale) { + realInverseFull(a, 0, scale); + } + + /** + * Computes 1D inverse DFT of real data leaving the result in a + * . This method computes the full real inverse transform, i.e. you will get + * the same result as from complexInverse called with all + * imaginary part equal 0. Because the result is stored in a, + * the size of the input array must greater or equal 2*n, with only the + * first n elements filled with real data. + * + * @param a + * data to transform + * @param offa + * index of the first element in array a + * @param scale + * if true then scaling is performed + */ + public void realInverseFull(final float[] a, final int offa, boolean scale) { + final int twon = 2 * n; + switch (plan) { + case SPLIT_RADIX: + realInverse2(a, offa, scale); + int nthreads = ConcurrencyUtils.getNumberOfThreads(); + if ((nthreads > 1) && (n / 2 > ConcurrencyUtils.getThreadsBeginN_1D_FFT_2Threads())) { + Future[] futures = new Future[nthreads]; + int k = n / 2 / nthreads; + for (int i = 0; i < nthreads; i++) { + final int firstIdx = i * k; + final int lastIdx = (i == (nthreads - 1)) ? n / 2 : firstIdx + k; + futures[i] = ConcurrencyUtils.submit(new Runnable() { + public void run() { + int idx1, idx2; + for (int k = firstIdx; k < lastIdx; k++) { + idx1 = 2 * k; + idx2 = offa + ((twon - idx1) % twon); + a[idx2] = a[offa + idx1]; + a[idx2 + 1] = -a[offa + idx1 + 1]; + } + } + }); + } + ConcurrencyUtils.waitForCompletion(futures); + } else { + int idx1, idx2; + for (int k = 0; k < n / 2; k++) { + idx1 = 2 * k; + idx2 = offa + ((twon - idx1) % twon); + a[idx2] = a[offa + idx1]; + a[idx2 + 1] = -a[offa + idx1 + 1]; + } + } + a[offa + n] = -a[offa + 1]; + a[offa + 1] = 0; + break; + case MIXED_RADIX: + rfftf(a, offa); + if (scale) { + scale(n, a, offa, false); + } + int m; + if (n % 2 == 0) { + m = n / 2; + } else { + m = (n + 1) / 2; + } + for (int k = 1; k < m; k++) { + int idx1 = offa + 2 * k; + int idx2 = offa + twon - 2 * k; + a[idx1] = -a[idx1]; + a[idx2 + 1] = -a[idx1]; + a[idx2] = a[idx1 - 1]; + } + for (int k = 1; k < n; k++) { + int idx = offa + n - k; + float tmp = a[idx + 1]; + a[idx + 1] = a[idx]; + a[idx] = tmp; + } + a[offa + 1] = 0; + break; + case BLUESTEIN: + bluestein_real_full(a, offa, 1); + if (scale) { + scale(n, a, offa, true); + } + break; + } + } + + private void realInverse2(float[] a, int offa, boolean scale) { + if (n == 1) + return; + switch (plan) { + case SPLIT_RADIX: + float xi; + + if (n > 4) { + cftfsub(n, a, offa, ip, nw, w); + rftbsub(n, a, offa, nc, w, nw); + } else if (n == 4) { + cftbsub(n, a, offa, ip, nw, w); + } + xi = a[offa] - a[offa + 1]; + a[offa] += a[offa + 1]; + a[offa + 1] = xi; + if (scale) { + scale(n, a, offa, false); + } + break; + case MIXED_RADIX: + rfftf(a, offa); + for (int k = n - 1; k >= 2; k--) { + int idx = offa + k; + float tmp = a[idx]; + a[idx] = a[idx - 1]; + a[idx - 1] = tmp; + } + if (scale) { + scale(n, a, offa, false); + } + int m; + if (n % 2 == 0) { + m = n / 2; + for (int i = 1; i < m; i++) { + int idx = offa + 2 * i + 1; + a[idx] = -a[idx]; + } + } else { + m = (n - 1) / 2; + for (int i = 0; i < m; i++) { + int idx = offa + 2 * i + 1; + a[idx] = -a[idx]; + } + } + break; + case BLUESTEIN: + bluestein_real_inverse2(a, offa); + if (scale) { + scale(n, a, offa, false); + } + break; + } + } + + private static int getReminder(int n, int[] factors) { + int reminder = n; + + if (n <= 0) { + throw new IllegalArgumentException("n must be positive integer"); + } + + for (int i = 0; i < factors.length && reminder != 1; i++) { + int factor = factors[i]; + while ((reminder % factor) == 0) { + reminder /= factor; + } + } + return reminder; + } + + /* -------- initializing routines -------- */ + + /*--------------------------------------------------------- + cffti: initialization of Complex FFT + --------------------------------------------------------*/ + + void cffti(int n, int offw) { + if (n == 1) + return; + + final int twon = 2 * n; + final int fourn = 4 * n; + float argh; + int idot, ntry = 0, i, j; + float argld; + int i1, k1, l1, l2, ib; + float fi; + int ld, ii, nf, ip, nl, nq, nr; + float arg; + int ido, ipm; + + nl = n; + nf = 0; + j = 0; + + factorize_loop: while (true) { + j++; + if (j <= 4) + ntry = factors[j - 1]; + else + ntry += 2; + do { + nq = nl / ntry; + nr = nl - ntry * nq; + if (nr != 0) + continue factorize_loop; + nf++; + wtable[offw + nf + 1 + fourn] = ntry; + nl = nq; + if (ntry == 2 && nf != 1) { + for (i = 2; i <= nf; i++) { + ib = nf - i + 2; + int idx = ib + fourn; + wtable[offw + idx + 1] = wtable[offw + idx]; + } + wtable[offw + 2 + fourn] = 2; + } + } while (nl != 1); + break; + } + wtable[offw + fourn] = n; + wtable[offw + 1 + fourn] = nf; + argh = TWO_PI / (float) n; + i = 1; + l1 = 1; + for (k1 = 1; k1 <= nf; k1++) { + ip = (int) wtable[offw + k1 + 1 + fourn]; + ld = 0; + l2 = l1 * ip; + ido = n / l2; + idot = ido + ido + 2; + ipm = ip - 1; + for (j = 1; j <= ipm; j++) { + i1 = i; + wtable[offw + i - 1 + twon] = 1; + wtable[offw + i + twon] = 0; + ld += l1; + fi = 0; + argld = ld * argh; + for (ii = 4; ii <= idot; ii += 2) { + i += 2; + fi += 1; + arg = fi * argld; + int idx = i + twon; + wtable[offw + idx - 1] = (float)Math.cos(arg); + wtable[offw + idx] = (float)Math.sin(arg); + } + if (ip > 5) { + int idx1 = i1 + twon; + int idx2 = i + twon; + wtable[offw + idx1 - 1] = wtable[offw + idx2 - 1]; + wtable[offw + idx1] = wtable[offw + idx2]; + } + } + l1 = l2; + } + + } + + void cffti() { + if (n == 1) + return; + + final int twon = 2 * n; + final int fourn = 4 * n; + float argh; + int idot, ntry = 0, i, j; + float argld; + int i1, k1, l1, l2, ib; + float fi; + int ld, ii, nf, ip, nl, nq, nr; + float arg; + int ido, ipm; + + nl = n; + nf = 0; + j = 0; + + factorize_loop: while (true) { + j++; + if (j <= 4) + ntry = factors[j - 1]; + else + ntry += 2; + do { + nq = nl / ntry; + nr = nl - ntry * nq; + if (nr != 0) + continue factorize_loop; + nf++; + wtable[nf + 1 + fourn] = ntry; + nl = nq; + if (ntry == 2 && nf != 1) { + for (i = 2; i <= nf; i++) { + ib = nf - i + 2; + int idx = ib + fourn; + wtable[idx + 1] = wtable[idx]; + } + wtable[2 + fourn] = 2; + } + } while (nl != 1); + break; + } + wtable[fourn] = n; + wtable[1 + fourn] = nf; + argh = TWO_PI / (float) n; + i = 1; + l1 = 1; + for (k1 = 1; k1 <= nf; k1++) { + ip = (int) wtable[k1 + 1 + fourn]; + ld = 0; + l2 = l1 * ip; + ido = n / l2; + idot = ido + ido + 2; + ipm = ip - 1; + for (j = 1; j <= ipm; j++) { + i1 = i; + wtable[i - 1 + twon] = 1; + wtable[i + twon] = 0; + ld += l1; + fi = 0; + argld = ld * argh; + for (ii = 4; ii <= idot; ii += 2) { + i += 2; + fi += 1; + arg = fi * argld; + int idx = i + twon; + wtable[idx - 1] = (float)Math.cos(arg); + wtable[idx] = (float)Math.sin(arg); + } + if (ip > 5) { + int idx1 = i1 + twon; + int idx2 = i + twon; + wtable[idx1 - 1] = wtable[idx2 - 1]; + wtable[idx1] = wtable[idx2]; + } + } + l1 = l2; + } + + } + + void rffti() { + + if (n == 1) + return; + final int twon = 2 * n; + float argh; + int ntry = 0, i, j; + float argld; + int k1, l1, l2, ib; + float fi; + int ld, ii, nf, ip, nl, is, nq, nr; + float arg; + int ido, ipm; + int nfm1; + + nl = n; + nf = 0; + j = 0; + + factorize_loop: while (true) { + ++j; + if (j <= 4) + ntry = factors[j - 1]; + else + ntry += 2; + do { + nq = nl / ntry; + nr = nl - ntry * nq; + if (nr != 0) + continue factorize_loop; + ++nf; + wtable_r[nf + 1 + twon] = ntry; + + nl = nq; + if (ntry == 2 && nf != 1) { + for (i = 2; i <= nf; i++) { + ib = nf - i + 2; + int idx = ib + twon; + wtable_r[idx + 1] = wtable_r[idx]; + } + wtable_r[2 + twon] = 2; + } + } while (nl != 1); + break; + } + wtable_r[twon] = n; + wtable_r[1 + twon] = nf; + argh = TWO_PI / (float) (n); + is = 0; + nfm1 = nf - 1; + l1 = 1; + if (nfm1 == 0) + return; + for (k1 = 1; k1 <= nfm1; k1++) { + ip = (int) wtable_r[k1 + 1 + twon]; + ld = 0; + l2 = l1 * ip; + ido = n / l2; + ipm = ip - 1; + for (j = 1; j <= ipm; ++j) { + ld += l1; + i = is; + argld = (float) ld * argh; + + fi = 0; + for (ii = 3; ii <= ido; ii += 2) { + i += 2; + fi += 1; + arg = fi * argld; + int idx = i + n; + wtable_r[idx - 2] = (float)Math.cos(arg); + wtable_r[idx - 1] = (float)Math.sin(arg); + } + is += ido; + } + l1 = l2; + } + } + + private void bluesteini() { + int k = 0; + float arg; + float pi_n = PI / n; + bk1[0] = 1; + bk1[1] = 0; + for (int i = 1; i < n; i++) { + k += 2 * i - 1; + if (k >= 2 * n) + k -= 2 * n; + arg = pi_n * k; + bk1[2 * i] = (float)Math.cos(arg); + bk1[2 * i + 1] = (float)Math.sin(arg); + } + float scale = (float)(1.0 / nBluestein); + bk2[0] = bk1[0] * scale; + bk2[1] = bk1[1] * scale; + for (int i = 2; i < 2 * n; i += 2) { + bk2[i] = bk1[i] * scale; + bk2[i + 1] = bk1[i + 1] * scale; + bk2[2 * nBluestein - i] = bk2[i]; + bk2[2 * nBluestein - i + 1] = bk2[i + 1]; + } + cftbsub(2 * nBluestein, bk2, 0, ip, nw, w); + } + + private void makewt(int nw) { + int j, nwh, nw0, nw1; + float delta, wn4r, wk1r, wk1i, wk3r, wk3i; + float delta2, deltaj, deltaj3; + + ip[0] = nw; + ip[1] = 1; + if (nw > 2) { + nwh = nw >> 1; + delta = (float)(0.785398163397448278999490867136046290 / nwh); + delta2 = delta * 2; + wn4r = (float)Math.cos(delta * nwh); + w[0] = 1; + w[1] = wn4r; + if (nwh == 4) { + w[2] = (float)Math.cos(delta2); + w[3] = (float)Math.sin(delta2); + } else if (nwh > 4) { + makeipt(nw); + w[2] = (float)(0.5 / Math.cos(delta2)); + w[3] = (float)(0.5 / Math.cos(delta * 6)); + for (j = 4; j < nwh; j += 4) { + deltaj = delta * j; + deltaj3 = 3 * deltaj; + w[j] = (float)Math.cos(deltaj); + w[j + 1] = (float)Math.sin(deltaj); + w[j + 2] = (float)Math.cos(deltaj3); + w[j + 3] = (float)-Math.sin(deltaj3); + } + } + nw0 = 0; + while (nwh > 2) { + nw1 = nw0 + nwh; + nwh >>= 1; + w[nw1] = 1; + w[nw1 + 1] = wn4r; + if (nwh == 4) { + wk1r = w[nw0 + 4]; + wk1i = w[nw0 + 5]; + w[nw1 + 2] = wk1r; + w[nw1 + 3] = wk1i; + } else if (nwh > 4) { + wk1r = w[nw0 + 4]; + wk3r = w[nw0 + 6]; + w[nw1 + 2] = (float)(0.5 / wk1r); + w[nw1 + 3] = (float)(0.5 / wk3r); + for (j = 4; j < nwh; j += 4) { + int idx1 = nw0 + 2 * j; + int idx2 = nw1 + j; + wk1r = w[idx1]; + wk1i = w[idx1 + 1]; + wk3r = w[idx1 + 2]; + wk3i = w[idx1 + 3]; + w[idx2] = wk1r; + w[idx2 + 1] = wk1i; + w[idx2 + 2] = wk3r; + w[idx2 + 3] = wk3i; + } + } + nw0 = nw1; + } + } + } + + private void makeipt(int nw) { + int j, l, m, m2, p, q; + + ip[2] = 0; + ip[3] = 16; + m = 2; + for (l = nw; l > 32; l >>= 2) { + m2 = m << 1; + q = m2 << 3; + for (j = m; j < m2; j++) { + p = ip[j] << 2; + ip[m + j] = p; + ip[m2 + j] = p + q; + } + m = m2; + } + } + + private void makect(int nc, float[] c, int startc) { + int j, nch; + float delta, deltaj; + + ip[1] = nc; + if (nc > 1) { + nch = nc >> 1; + delta = (float)(0.785398163397448278999490867136046290 / nch); + c[startc] = (float)Math.cos(delta * nch); + c[startc + nch] = (float)(0.5 * c[startc]); + for (j = 1; j < nch; j++) { + deltaj = delta * j; + c[startc + j] = (float)(0.5 * Math.cos(deltaj)); + c[startc + nc - j] = (float)(0.5 * Math.sin(deltaj)); + } + } + } + + private void bluestein_complex(final float[] a, final int offa, final int isign) { + final float[] ak = new float[2 * nBluestein]; + int nthreads = ConcurrencyUtils.getNumberOfThreads(); + if ((nthreads > 1) && (n > ConcurrencyUtils.getThreadsBeginN_1D_FFT_2Threads())) { + nthreads = 2; + if ((nthreads >= 4) && (n > ConcurrencyUtils.getThreadsBeginN_1D_FFT_4Threads())) { + nthreads = 4; + } + Future[] futures = new Future[nthreads]; + int k = n / nthreads; + for (int i = 0; i < nthreads; i++) { + final int firstIdx = i * k; + final int lastIdx = (i == (nthreads - 1)) ? n : firstIdx + k; + futures[i] = ConcurrencyUtils.submit(new Runnable() { + public void run() { + if (isign > 0) { + for (int i = firstIdx; i < lastIdx; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + idx1; + int idx4 = offa + idx2; + ak[idx1] = a[idx3] * bk1[idx1] - a[idx4] * bk1[idx2]; + ak[idx2] = a[idx3] * bk1[idx2] + a[idx4] * bk1[idx1]; + } + } else { + for (int i = firstIdx; i < lastIdx; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + idx1; + int idx4 = offa + idx2; + ak[idx1] = a[idx3] * bk1[idx1] + a[idx4] * bk1[idx2]; + ak[idx2] = -a[idx3] * bk1[idx2] + a[idx4] * bk1[idx1]; + } + } + } + }); + } + ConcurrencyUtils.waitForCompletion(futures); + + cftbsub(2 * nBluestein, ak, 0, ip, nw, w); + + k = nBluestein / nthreads; + for (int i = 0; i < nthreads; i++) { + final int firstIdx = i * k; + final int lastIdx = (i == (nthreads - 1)) ? nBluestein : firstIdx + k; + futures[i] = ConcurrencyUtils.submit(new Runnable() { + public void run() { + if (isign > 0) { + for (int i = firstIdx; i < lastIdx; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + float im = -ak[idx1] * bk2[idx2] + ak[idx2] * bk2[idx1]; + ak[idx1] = ak[idx1] * bk2[idx1] + ak[idx2] * bk2[idx2]; + ak[idx2] = im; + } + } else { + for (int i = firstIdx; i < lastIdx; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + float im = ak[idx1] * bk2[idx2] + ak[idx2] * bk2[idx1]; + ak[idx1] = ak[idx1] * bk2[idx1] - ak[idx2] * bk2[idx2]; + ak[idx2] = im; + } + } + } + }); + } + ConcurrencyUtils.waitForCompletion(futures); + + cftfsub(2 * nBluestein, ak, 0, ip, nw, w); + + k = n / nthreads; + for (int i = 0; i < nthreads; i++) { + final int firstIdx = i * k; + final int lastIdx = (i == (nthreads - 1)) ? n : firstIdx + k; + futures[i] = ConcurrencyUtils.submit(new Runnable() { + public void run() { + if (isign > 0) { + for (int i = firstIdx; i < lastIdx; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + idx1; + int idx4 = offa + idx2; + a[idx3] = bk1[idx1] * ak[idx1] - bk1[idx2] * ak[idx2]; + a[idx4] = bk1[idx2] * ak[idx1] + bk1[idx1] * ak[idx2]; + } + } else { + for (int i = firstIdx; i < lastIdx; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + idx1; + int idx4 = offa + idx2; + a[idx3] = bk1[idx1] * ak[idx1] + bk1[idx2] * ak[idx2]; + a[idx4] = -bk1[idx2] * ak[idx1] + bk1[idx1] * ak[idx2]; + } + } + } + }); + } + ConcurrencyUtils.waitForCompletion(futures); + } else { + if (isign > 0) { + for (int i = 0; i < n; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + idx1; + int idx4 = offa + idx2; + ak[idx1] = a[idx3] * bk1[idx1] - a[idx4] * bk1[idx2]; + ak[idx2] = a[idx3] * bk1[idx2] + a[idx4] * bk1[idx1]; + } + } else { + for (int i = 0; i < n; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + idx1; + int idx4 = offa + idx2; + ak[idx1] = a[idx3] * bk1[idx1] + a[idx4] * bk1[idx2]; + ak[idx2] = -a[idx3] * bk1[idx2] + a[idx4] * bk1[idx1]; + } + } + + cftbsub(2 * nBluestein, ak, 0, ip, nw, w); + + if (isign > 0) { + for (int i = 0; i < nBluestein; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + float im = -ak[idx1] * bk2[idx2] + ak[idx2] * bk2[idx1]; + ak[idx1] = ak[idx1] * bk2[idx1] + ak[idx2] * bk2[idx2]; + ak[idx2] = im; + } + } else { + for (int i = 0; i < nBluestein; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + float im = ak[idx1] * bk2[idx2] + ak[idx2] * bk2[idx1]; + ak[idx1] = ak[idx1] * bk2[idx1] - ak[idx2] * bk2[idx2]; + ak[idx2] = im; + } + } + + cftfsub(2 * nBluestein, ak, 0, ip, nw, w); + if (isign > 0) { + for (int i = 0; i < n; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + idx1; + int idx4 = offa + idx2; + a[idx3] = bk1[idx1] * ak[idx1] - bk1[idx2] * ak[idx2]; + a[idx4] = bk1[idx2] * ak[idx1] + bk1[idx1] * ak[idx2]; + } + } else { + for (int i = 0; i < n; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + idx1; + int idx4 = offa + idx2; + a[idx3] = bk1[idx1] * ak[idx1] + bk1[idx2] * ak[idx2]; + a[idx4] = -bk1[idx2] * ak[idx1] + bk1[idx1] * ak[idx2]; + } + } + } + } + + private void bluestein_real_full(final float[] a, final int offa, final int isign) { + final float[] ak = new float[2 * nBluestein]; + int nthreads = ConcurrencyUtils.getNumberOfThreads(); + if ((nthreads > 1) && (n > ConcurrencyUtils.getThreadsBeginN_1D_FFT_2Threads())) { + nthreads = 2; + if ((nthreads >= 4) && (n > ConcurrencyUtils.getThreadsBeginN_1D_FFT_4Threads())) { + nthreads = 4; + } + Future[] futures = new Future[nthreads]; + int k = n / nthreads; + for (int i = 0; i < nthreads; i++) { + final int firstIdx = i * k; + final int lastIdx = (i == (nthreads - 1)) ? n : firstIdx + k; + futures[i] = ConcurrencyUtils.submit(new Runnable() { + public void run() { + if (isign > 0) { + for (int i = firstIdx; i < lastIdx; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + i; + ak[idx1] = a[idx3] * bk1[idx1]; + ak[idx2] = a[idx3] * bk1[idx2]; + } + } else { + for (int i = firstIdx; i < lastIdx; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + i; + ak[idx1] = a[idx3] * bk1[idx1]; + ak[idx2] = -a[idx3] * bk1[idx2]; + } + } + } + }); + } + ConcurrencyUtils.waitForCompletion(futures); + + cftbsub(2 * nBluestein, ak, 0, ip, nw, w); + + k = nBluestein / nthreads; + for (int i = 0; i < nthreads; i++) { + final int firstIdx = i * k; + final int lastIdx = (i == (nthreads - 1)) ? nBluestein : firstIdx + k; + futures[i] = ConcurrencyUtils.submit(new Runnable() { + public void run() { + if (isign > 0) { + for (int i = firstIdx; i < lastIdx; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + float im = -ak[idx1] * bk2[idx2] + ak[idx2] * bk2[idx1]; + ak[idx1] = ak[idx1] * bk2[idx1] + ak[idx2] * bk2[idx2]; + ak[idx2] = im; + } + } else { + for (int i = firstIdx; i < lastIdx; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + float im = ak[idx1] * bk2[idx2] + ak[idx2] * bk2[idx1]; + ak[idx1] = ak[idx1] * bk2[idx1] - ak[idx2] * bk2[idx2]; + ak[idx2] = im; + } + } + } + }); + } + ConcurrencyUtils.waitForCompletion(futures); + + cftfsub(2 * nBluestein, ak, 0, ip, nw, w); + + k = n / nthreads; + for (int i = 0; i < nthreads; i++) { + final int firstIdx = i * k; + final int lastIdx = (i == (nthreads - 1)) ? n : firstIdx + k; + futures[i] = ConcurrencyUtils.submit(new Runnable() { + public void run() { + if (isign > 0) { + for (int i = firstIdx; i < lastIdx; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + a[offa + idx1] = bk1[idx1] * ak[idx1] - bk1[idx2] * ak[idx2]; + a[offa + idx2] = bk1[idx2] * ak[idx1] + bk1[idx1] * ak[idx2]; + } + } else { + for (int i = firstIdx; i < lastIdx; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + a[offa + idx1] = bk1[idx1] * ak[idx1] + bk1[idx2] * ak[idx2]; + a[offa + idx2] = -bk1[idx2] * ak[idx1] + bk1[idx1] * ak[idx2]; + } + } + } + }); + } + ConcurrencyUtils.waitForCompletion(futures); + } else { + if (isign > 0) { + for (int i = 0; i < n; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + i; + ak[idx1] = a[idx3] * bk1[idx1]; + ak[idx2] = a[idx3] * bk1[idx2]; + } + } else { + for (int i = 0; i < n; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + i; + ak[idx1] = a[idx3] * bk1[idx1]; + ak[idx2] = -a[idx3] * bk1[idx2]; + } + } + + cftbsub(2 * nBluestein, ak, 0, ip, nw, w); + + if (isign > 0) { + for (int i = 0; i < nBluestein; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + float im = -ak[idx1] * bk2[idx2] + ak[idx2] * bk2[idx1]; + ak[idx1] = ak[idx1] * bk2[idx1] + ak[idx2] * bk2[idx2]; + ak[idx2] = im; + } + } else { + for (int i = 0; i < nBluestein; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + float im = ak[idx1] * bk2[idx2] + ak[idx2] * bk2[idx1]; + ak[idx1] = ak[idx1] * bk2[idx1] - ak[idx2] * bk2[idx2]; + ak[idx2] = im; + } + } + + cftfsub(2 * nBluestein, ak, 0, ip, nw, w); + + if (isign > 0) { + for (int i = 0; i < n; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + a[offa + idx1] = bk1[idx1] * ak[idx1] - bk1[idx2] * ak[idx2]; + a[offa + idx2] = bk1[idx2] * ak[idx1] + bk1[idx1] * ak[idx2]; + } + } else { + for (int i = 0; i < n; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + a[offa + idx1] = bk1[idx1] * ak[idx1] + bk1[idx2] * ak[idx2]; + a[offa + idx2] = -bk1[idx2] * ak[idx1] + bk1[idx1] * ak[idx2]; + } + } + } + } + + private void bluestein_real_forward(final float[] a, final int offa) { + final float[] ak = new float[2 * nBluestein]; + int nthreads = ConcurrencyUtils.getNumberOfThreads(); + if ((nthreads > 1) && (n > ConcurrencyUtils.getThreadsBeginN_1D_FFT_2Threads())) { + nthreads = 2; + if ((nthreads >= 4) && (n > ConcurrencyUtils.getThreadsBeginN_1D_FFT_4Threads())) { + nthreads = 4; + } + Future[] futures = new Future[nthreads]; + int k = n / nthreads; + for (int i = 0; i < nthreads; i++) { + final int firstIdx = i * k; + final int lastIdx = (i == (nthreads - 1)) ? n : firstIdx + k; + futures[i] = ConcurrencyUtils.submit(new Runnable() { + public void run() { + for (int i = firstIdx; i < lastIdx; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + i; + ak[idx1] = a[idx3] * bk1[idx1]; + ak[idx2] = -a[idx3] * bk1[idx2]; + } + } + }); + } + ConcurrencyUtils.waitForCompletion(futures); + + cftbsub(2 * nBluestein, ak, 0, ip, nw, w); + + k = nBluestein / nthreads; + for (int i = 0; i < nthreads; i++) { + final int firstIdx = i * k; + final int lastIdx = (i == (nthreads - 1)) ? nBluestein : firstIdx + k; + futures[i] = ConcurrencyUtils.submit(new Runnable() { + public void run() { + for (int i = firstIdx; i < lastIdx; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + float im = ak[idx1] * bk2[idx2] + ak[idx2] * bk2[idx1]; + ak[idx1] = ak[idx1] * bk2[idx1] - ak[idx2] * bk2[idx2]; + ak[idx2] = im; + } + } + }); + } + ConcurrencyUtils.waitForCompletion(futures); + + } else { + + for (int i = 0; i < n; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + i; + ak[idx1] = a[idx3] * bk1[idx1]; + ak[idx2] = -a[idx3] * bk1[idx2]; + } + + cftbsub(2 * nBluestein, ak, 0, ip, nw, w); + + for (int i = 0; i < nBluestein; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + float im = ak[idx1] * bk2[idx2] + ak[idx2] * bk2[idx1]; + ak[idx1] = ak[idx1] * bk2[idx1] - ak[idx2] * bk2[idx2]; + ak[idx2] = im; + } + } + + cftfsub(2 * nBluestein, ak, 0, ip, nw, w); + + if (n % 2 == 0) { + a[offa] = bk1[0] * ak[0] + bk1[1] * ak[1]; + a[offa + 1] = bk1[n] * ak[n] + bk1[n + 1] * ak[n + 1]; + for (int i = 1; i < n / 2; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + a[offa + idx1] = bk1[idx1] * ak[idx1] + bk1[idx2] * ak[idx2]; + a[offa + idx2] = -bk1[idx2] * ak[idx1] + bk1[idx1] * ak[idx2]; + } + } else { + a[offa] = bk1[0] * ak[0] + bk1[1] * ak[1]; + a[offa + 1] = -bk1[n] * ak[n - 1] + bk1[n - 1] * ak[n]; + for (int i = 1; i < (n - 1) / 2; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + a[offa + idx1] = bk1[idx1] * ak[idx1] + bk1[idx2] * ak[idx2]; + a[offa + idx2] = -bk1[idx2] * ak[idx1] + bk1[idx1] * ak[idx2]; + } + a[offa + n - 1] = bk1[n - 1] * ak[n - 1] + bk1[n] * ak[n]; + } + + } + + private void bluestein_real_inverse(final float[] a, final int offa) { + final float[] ak = new float[2 * nBluestein]; + if (n % 2 == 0) { + ak[0] = a[offa] * bk1[0]; + ak[1] = a[offa] * bk1[1]; + + for (int i = 1; i < n / 2; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + idx1; + int idx4 = offa + idx2; + ak[idx1] = a[idx3] * bk1[idx1] - a[idx4] * bk1[idx2]; + ak[idx2] = a[idx3] * bk1[idx2] + a[idx4] * bk1[idx1]; + } + + ak[n] = a[offa + 1] * bk1[n]; + ak[n + 1] = a[offa + 1] * bk1[n + 1]; + + for (int i = n / 2 + 1; i < n; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + 2 * n - idx1; + int idx4 = idx3 + 1; + ak[idx1] = a[idx3] * bk1[idx1] + a[idx4] * bk1[idx2]; + ak[idx2] = a[idx3] * bk1[idx2] - a[idx4] * bk1[idx1]; + } + + } else { + ak[0] = a[offa] * bk1[0]; + ak[1] = a[offa] * bk1[1]; + + for (int i = 1; i < (n - 1) / 2; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + idx1; + int idx4 = offa + idx2; + ak[idx1] = a[idx3] * bk1[idx1] - a[idx4] * bk1[idx2]; + ak[idx2] = a[idx3] * bk1[idx2] + a[idx4] * bk1[idx1]; + } + + ak[n - 1] = a[offa + n - 1] * bk1[n - 1] - a[offa + 1] * bk1[n]; + ak[n] = a[offa + n - 1] * bk1[n] + a[offa + 1] * bk1[n - 1]; + + ak[n + 1] = a[offa + n - 1] * bk1[n + 1] + a[offa + 1] * bk1[n + 2]; + ak[n + 2] = a[offa + n - 1] * bk1[n + 2] - a[offa + 1] * bk1[n + 1]; + + for (int i = (n - 1) / 2 + 2; i < n; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + 2 * n - idx1; + int idx4 = idx3 + 1; + ak[idx1] = a[idx3] * bk1[idx1] + a[idx4] * bk1[idx2]; + ak[idx2] = a[idx3] * bk1[idx2] - a[idx4] * bk1[idx1]; + } + } + + cftbsub(2 * nBluestein, ak, 0, ip, nw, w); + + int nthreads = ConcurrencyUtils.getNumberOfThreads(); + if ((nthreads > 1) && (n > ConcurrencyUtils.getThreadsBeginN_1D_FFT_2Threads())) { + nthreads = 2; + if ((nthreads >= 4) && (n > ConcurrencyUtils.getThreadsBeginN_1D_FFT_4Threads())) { + nthreads = 4; + } + Future[] futures = new Future[nthreads]; + int k = nBluestein / nthreads; + for (int i = 0; i < nthreads; i++) { + final int firstIdx = i * k; + final int lastIdx = (i == (nthreads - 1)) ? nBluestein : firstIdx + k; + futures[i] = ConcurrencyUtils.submit(new Runnable() { + public void run() { + for (int i = firstIdx; i < lastIdx; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + float im = -ak[idx1] * bk2[idx2] + ak[idx2] * bk2[idx1]; + ak[idx1] = ak[idx1] * bk2[idx1] + ak[idx2] * bk2[idx2]; + ak[idx2] = im; + } + } + }); + } + ConcurrencyUtils.waitForCompletion(futures); + + cftfsub(2 * nBluestein, ak, 0, ip, nw, w); + + k = n / nthreads; + for (int i = 0; i < nthreads; i++) { + final int firstIdx = i * k; + final int lastIdx = (i == (nthreads - 1)) ? n : firstIdx + k; + futures[i] = ConcurrencyUtils.submit(new Runnable() { + public void run() { + for (int i = firstIdx; i < lastIdx; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + a[offa + i] = bk1[idx1] * ak[idx1] - bk1[idx2] * ak[idx2]; + } + } + }); + } + ConcurrencyUtils.waitForCompletion(futures); + + } else { + + for (int i = 0; i < nBluestein; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + float im = -ak[idx1] * bk2[idx2] + ak[idx2] * bk2[idx1]; + ak[idx1] = ak[idx1] * bk2[idx1] + ak[idx2] * bk2[idx2]; + ak[idx2] = im; + } + + cftfsub(2 * nBluestein, ak, 0, ip, nw, w); + + for (int i = 0; i < n; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + a[offa + i] = bk1[idx1] * ak[idx1] - bk1[idx2] * ak[idx2]; + } + } + } + + private void bluestein_real_inverse2(final float[] a, final int offa) { + final float[] ak = new float[2 * nBluestein]; + int nthreads = ConcurrencyUtils.getNumberOfThreads(); + if ((nthreads > 1) && (n > ConcurrencyUtils.getThreadsBeginN_1D_FFT_2Threads())) { + nthreads = 2; + if ((nthreads >= 4) && (n > ConcurrencyUtils.getThreadsBeginN_1D_FFT_4Threads())) { + nthreads = 4; + } + Future[] futures = new Future[nthreads]; + int k = n / nthreads; + for (int i = 0; i < nthreads; i++) { + final int firstIdx = i * k; + final int lastIdx = (i == (nthreads - 1)) ? n : firstIdx + k; + futures[i] = ConcurrencyUtils.submit(new Runnable() { + public void run() { + for (int i = firstIdx; i < lastIdx; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + i; + ak[idx1] = a[idx3] * bk1[idx1]; + ak[idx2] = a[idx3] * bk1[idx2]; + } + } + }); + } + ConcurrencyUtils.waitForCompletion(futures); + + cftbsub(2 * nBluestein, ak, 0, ip, nw, w); + + k = nBluestein / nthreads; + for (int i = 0; i < nthreads; i++) { + final int firstIdx = i * k; + final int lastIdx = (i == (nthreads - 1)) ? nBluestein : firstIdx + k; + futures[i] = ConcurrencyUtils.submit(new Runnable() { + public void run() { + for (int i = firstIdx; i < lastIdx; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + float im = -ak[idx1] * bk2[idx2] + ak[idx2] * bk2[idx1]; + ak[idx1] = ak[idx1] * bk2[idx1] + ak[idx2] * bk2[idx2]; + ak[idx2] = im; + } + } + }); + } + ConcurrencyUtils.waitForCompletion(futures); + + } else { + + for (int i = 0; i < n; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + int idx3 = offa + i; + ak[idx1] = a[idx3] * bk1[idx1]; + ak[idx2] = a[idx3] * bk1[idx2]; + } + + cftbsub(2 * nBluestein, ak, 0, ip, nw, w); + + for (int i = 0; i < nBluestein; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + float im = -ak[idx1] * bk2[idx2] + ak[idx2] * bk2[idx1]; + ak[idx1] = ak[idx1] * bk2[idx1] + ak[idx2] * bk2[idx2]; + ak[idx2] = im; + } + } + + cftfsub(2 * nBluestein, ak, 0, ip, nw, w); + + if (n % 2 == 0) { + a[offa] = bk1[0] * ak[0] - bk1[1] * ak[1]; + a[offa + 1] = bk1[n] * ak[n] - bk1[n + 1] * ak[n + 1]; + for (int i = 1; i < n / 2; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + a[offa + idx1] = bk1[idx1] * ak[idx1] - bk1[idx2] * ak[idx2]; + a[offa + idx2] = bk1[idx2] * ak[idx1] + bk1[idx1] * ak[idx2]; + } + } else { + a[offa] = bk1[0] * ak[0] - bk1[1] * ak[1]; + a[offa + 1] = bk1[n] * ak[n - 1] + bk1[n - 1] * ak[n]; + for (int i = 1; i < (n - 1) / 2; i++) { + int idx1 = 2 * i; + int idx2 = idx1 + 1; + a[offa + idx1] = bk1[idx1] * ak[idx1] - bk1[idx2] * ak[idx2]; + a[offa + idx2] = bk1[idx2] * ak[idx1] + bk1[idx1] * ak[idx2]; + } + a[offa + n - 1] = bk1[n - 1] * ak[n - 1] - bk1[n] * ak[n]; + } + } + + /*--------------------------------------------------------- + rfftf1: further processing of Real forward FFT + --------------------------------------------------------*/ + void rfftf(final float[] a, final int offa) { + if (n == 1) + return; + int l1, l2, na, kh, nf, ip, iw, ido, idl1; + + final float[] ch = new float[n]; + final int twon = 2 * n; + nf = (int) wtable_r[1 + twon]; + na = 1; + l2 = n; + iw = twon - 1; + for (int k1 = 1; k1 <= nf; ++k1) { + kh = nf - k1; + ip = (int) wtable_r[kh + 2 + twon]; + l1 = l2 / ip; + ido = n / l2; + idl1 = ido * l1; + iw -= (ip - 1) * ido; + na = 1 - na; + switch (ip) { + case 2: + if (na == 0) { + radf2(ido, l1, a, offa, ch, 0, iw); + } else { + radf2(ido, l1, ch, 0, a, offa, iw); + } + break; + case 3: + if (na == 0) { + radf3(ido, l1, a, offa, ch, 0, iw); + } else { + radf3(ido, l1, ch, 0, a, offa, iw); + } + break; + case 4: + if (na == 0) { + radf4(ido, l1, a, offa, ch, 0, iw); + } else { + radf4(ido, l1, ch, 0, a, offa, iw); + } + break; + case 5: + if (na == 0) { + radf5(ido, l1, a, offa, ch, 0, iw); + } else { + radf5(ido, l1, ch, 0, a, offa, iw); + } + break; + default: + if (ido == 1) + na = 1 - na; + if (na == 0) { + radfg(ido, ip, l1, idl1, a, offa, ch, 0, iw); + na = 1; + } else { + radfg(ido, ip, l1, idl1, ch, 0, a, offa, iw); + na = 0; + } + break; + } + l2 = l1; + } + if (na == 1) + return; + System.arraycopy(ch, 0, a, offa, n); + } + + /*--------------------------------------------------------- + rfftb1: further processing of Real backward FFT + --------------------------------------------------------*/ + void rfftb(final float[] a, final int offa) { + if (n == 1) + return; + int l1, l2, na, nf, ip, iw, ido, idl1; + + float[] ch = new float[n]; + final int twon = 2 * n; + nf = (int) wtable_r[1 + twon]; + na = 0; + l1 = 1; + iw = n; + for (int k1 = 1; k1 <= nf; k1++) { + ip = (int) wtable_r[k1 + 1 + twon]; + l2 = ip * l1; + ido = n / l2; + idl1 = ido * l1; + switch (ip) { + case 2: + if (na == 0) { + radb2(ido, l1, a, offa, ch, 0, iw); + } else { + radb2(ido, l1, ch, 0, a, offa, iw); + } + na = 1 - na; + break; + case 3: + if (na == 0) { + radb3(ido, l1, a, offa, ch, 0, iw); + } else { + radb3(ido, l1, ch, 0, a, offa, iw); + } + na = 1 - na; + break; + case 4: + if (na == 0) { + radb4(ido, l1, a, offa, ch, 0, iw); + } else { + radb4(ido, l1, ch, 0, a, offa, iw); + } + na = 1 - na; + break; + case 5: + if (na == 0) { + radb5(ido, l1, a, offa, ch, 0, iw); + } else { + radb5(ido, l1, ch, 0, a, offa, iw); + } + na = 1 - na; + break; + default: + if (na == 0) { + radbg(ido, ip, l1, idl1, a, offa, ch, 0, iw); + } else { + radbg(ido, ip, l1, idl1, ch, 0, a, offa, iw); + } + if (ido == 1) + na = 1 - na; + break; + } + l1 = l2; + iw += (ip - 1) * ido; + } + if (na == 0) + return; + System.arraycopy(ch, 0, a, offa, n); + } + + /*------------------------------------------------- + radf2: Real FFT's forward processing of factor 2 + -------------------------------------------------*/ + void radf2(final int ido, final int l1, final float[] in, final int in_off, final float[] out, final int out_off, final int offset) { + int i, ic, idx0, idx1, idx2, idx3, idx4; + float t1i, t1r, w1r, w1i; + int iw1; + iw1 = offset; + idx0 = l1 * ido; + idx1 = 2 * ido; + for (int k = 0; k < l1; k++) { + int oidx1 = out_off + k * idx1; + int oidx2 = oidx1 + idx1 - 1; + int iidx1 = in_off + k * ido; + int iidx2 = iidx1 + idx0; + + float i1r = in[iidx1]; + float i2r = in[iidx2]; + + out[oidx1] = i1r + i2r; + out[oidx2] = i1r - i2r; + } + if (ido < 2) + return; + if (ido != 2) { + for (int k = 0; k < l1; k++) { + idx1 = k * ido; + idx2 = 2 * idx1; + idx3 = idx2 + ido; + idx4 = idx1 + idx0; + for (i = 2; i < ido; i += 2) { + ic = ido - i; + int widx1 = i - 1 + iw1; + int oidx1 = out_off + i + idx2; + int oidx2 = out_off + ic + idx3; + int iidx1 = in_off + i + idx1; + int iidx2 = in_off + i + idx4; + + float a1i = in[iidx1 - 1]; + float a1r = in[iidx1]; + float a2i = in[iidx2 - 1]; + float a2r = in[iidx2]; + + w1r = wtable_r[widx1 - 1]; + w1i = wtable_r[widx1]; + + t1r = w1r * a2i + w1i * a2r; + t1i = w1r * a2r - w1i * a2i; + + out[oidx1] = a1r + t1i; + out[oidx1 - 1] = a1i + t1r; + + out[oidx2] = t1i - a1r; + out[oidx2 - 1] = a1i - t1r; + } + } + if (ido % 2 == 1) + return; + } + idx2 = 2 * idx1; + for (int k = 0; k < l1; k++) { + idx1 = k * ido; + int oidx1 = out_off + idx2 + ido; + int iidx1 = in_off + ido - 1 + idx1; + + out[oidx1] = -in[iidx1 + idx0]; + out[oidx1 - 1] = in[iidx1]; + } + } + + /*------------------------------------------------- + radb2: Real FFT's backward processing of factor 2 + -------------------------------------------------*/ + void radb2(final int ido, final int l1, final float[] in, final int in_off, final float[] out, final int out_off, final int offset) { + int i, ic; + float t1i, t1r, w1r, w1i; + int iw1 = offset; + + int idx0 = l1 * ido; + for (int k = 0; k < l1; k++) { + int idx1 = k * ido; + int idx2 = 2 * idx1; + int idx3 = idx2 + ido; + int oidx1 = out_off + idx1; + int iidx1 = in_off + idx2; + int iidx2 = in_off + ido - 1 + idx3; + float i1r = in[iidx1]; + float i2r = in[iidx2]; + out[oidx1] = i1r + i2r; + out[oidx1 + idx0] = i1r - i2r; + } + if (ido < 2) + return; + if (ido != 2) { + for (int k = 0; k < l1; ++k) { + int idx1 = k * ido; + int idx2 = 2 * idx1; + int idx3 = idx2 + ido; + int idx4 = idx1 + idx0; + for (i = 2; i < ido; i += 2) { + ic = ido - i; + int idx5 = i - 1 + iw1; + int idx6 = out_off + i; + int idx7 = in_off + i; + int idx8 = in_off + ic; + w1r = wtable_r[idx5 - 1]; + w1i = wtable_r[idx5]; + int iidx1 = idx7 + idx2; + int iidx2 = idx8 + idx3; + int oidx1 = idx6 + idx1; + int oidx2 = idx6 + idx4; + t1r = in[iidx1 - 1] - in[iidx2 - 1]; + t1i = in[iidx1] + in[iidx2]; + float i1i = in[iidx1]; + float i1r = in[iidx1 - 1]; + float i2i = in[iidx2]; + float i2r = in[iidx2 - 1]; + + out[oidx1 - 1] = i1r + i2r; + out[oidx1] = i1i - i2i; + out[oidx2 - 1] = w1r * t1r - w1i * t1i; + out[oidx2] = w1r * t1i + w1i * t1r; + } + } + if (ido % 2 == 1) + return; + } + for (int k = 0; k < l1; k++) { + int idx1 = k * ido; + int idx2 = 2 * idx1; + int oidx1 = out_off + ido - 1 + idx1; + int iidx1 = in_off + idx2 + ido; + out[oidx1] = 2 * in[iidx1 - 1]; + out[oidx1 + idx0] = -2 * in[iidx1]; + } + } + + /*------------------------------------------------- + radf3: Real FFT's forward processing of factor 3 + -------------------------------------------------*/ + void radf3(final int ido, final int l1, final float[] in, final int in_off, final float[] out, final int out_off, final int offset) { + final float taur = -0.5f; + final float taui = 0.866025403784438707610604524234076962f; + int i, ic; + float ci2, di2, di3, cr2, dr2, dr3, ti2, ti3, tr2, tr3, w1r, w2r, w1i, w2i; + int iw1, iw2; + iw1 = offset; + iw2 = iw1 + ido; + + int idx0 = l1 * ido; + for (int k = 0; k < l1; k++) { + int idx1 = k * ido; + int idx3 = 2 * idx0; + int idx4 = (3 * k + 1) * ido; + int iidx1 = in_off + idx1; + int iidx2 = iidx1 + idx0; + int iidx3 = iidx1 + idx3; + float i1r = in[iidx1]; + float i2r = in[iidx2]; + float i3r = in[iidx3]; + cr2 = i2r + i3r; + out[out_off + 3 * idx1] = i1r + cr2; + out[out_off + idx4 + ido] = taui * (i3r - i2r); + out[out_off + ido - 1 + idx4] = i1r + taur * cr2; + } + if (ido == 1) + return; + for (int k = 0; k < l1; k++) { + int idx3 = k * ido; + int idx4 = 3 * idx3; + int idx5 = idx3 + idx0; + int idx6 = idx5 + idx0; + int idx7 = idx4 + ido; + int idx8 = idx7 + ido; + for (i = 2; i < ido; i += 2) { + ic = ido - i; + int widx1 = i - 1 + iw1; + int widx2 = i - 1 + iw2; + + w1r = wtable_r[widx1 - 1]; + w1i = wtable_r[widx1]; + w2r = wtable_r[widx2 - 1]; + w2i = wtable_r[widx2]; + + int idx9 = in_off + i; + int idx10 = out_off + i; + int idx11 = out_off + ic; + int iidx1 = idx9 + idx3; + int iidx2 = idx9 + idx5; + int iidx3 = idx9 + idx6; + + float i1i = in[iidx1 - 1]; + float i1r = in[iidx1]; + float i2i = in[iidx2 - 1]; + float i2r = in[iidx2]; + float i3i = in[iidx3 - 1]; + float i3r = in[iidx3]; + + dr2 = w1r * i2i + w1i * i2r; + di2 = w1r * i2r - w1i * i2i; + dr3 = w2r * i3i + w2i * i3r; + di3 = w2r * i3r - w2i * i3i; + cr2 = dr2 + dr3; + ci2 = di2 + di3; + tr2 = i1i + taur * cr2; + ti2 = i1r + taur * ci2; + tr3 = taui * (di2 - di3); + ti3 = taui * (dr3 - dr2); + + int oidx1 = idx10 + idx4; + int oidx2 = idx11 + idx7; + int oidx3 = idx10 + idx8; + + out[oidx1 - 1] = i1i + cr2; + out[oidx1] = i1r + ci2; + out[oidx2 - 1] = tr2 - tr3; + out[oidx2] = ti3 - ti2; + out[oidx3 - 1] = tr2 + tr3; + out[oidx3] = ti2 + ti3; + } + } + } + + /*------------------------------------------------- + radb3: Real FFT's backward processing of factor 3 + -------------------------------------------------*/ + void radb3(final int ido, final int l1, final float[] in, final int in_off, final float[] out, final int out_off, final int offset) { + final float taur = -0.5f; + final float taui = 0.866025403784438707610604524234076962f; + int i, ic; + float ci2, ci3, di2, di3, cr2, cr3, dr2, dr3, ti2, tr2, w1r, w2r, w1i, w2i; + int iw1, iw2; + iw1 = offset; + iw2 = iw1 + ido; + + for (int k = 0; k < l1; k++) { + int idx1 = k * ido; + int iidx1 = in_off + 3 * idx1; + int iidx2 = iidx1 + 2 * ido; + float i1i = in[iidx1]; + + tr2 = 2 * in[iidx2 - 1]; + cr2 = i1i + taur * tr2; + ci3 = 2 * taui * in[iidx2]; + + out[out_off + idx1] = i1i + tr2; + out[out_off + (k + l1) * ido] = cr2 - ci3; + out[out_off + (k + 2 * l1) * ido] = cr2 + ci3; + } + if (ido == 1) + return; + int idx0 = l1 * ido; + for (int k = 0; k < l1; k++) { + int idx1 = k * ido; + int idx2 = 3 * idx1; + int idx3 = idx2 + ido; + int idx4 = idx3 + ido; + int idx5 = idx1 + idx0; + int idx6 = idx5 + idx0; + for (i = 2; i < ido; i += 2) { + ic = ido - i; + int idx7 = in_off + i; + int idx8 = in_off + ic; + int idx9 = out_off + i; + int iidx1 = idx7 + idx2; + int iidx2 = idx7 + idx4; + int iidx3 = idx8 + idx3; + + float i1i = in[iidx1 - 1]; + float i1r = in[iidx1]; + float i2i = in[iidx2 - 1]; + float i2r = in[iidx2]; + float i3i = in[iidx3 - 1]; + float i3r = in[iidx3]; + + tr2 = i2i + i3i; + cr2 = i1i + taur * tr2; + ti2 = i2r - i3r; + ci2 = i1r + taur * ti2; + cr3 = taui * (i2i - i3i); + ci3 = taui * (i2r + i3r); + dr2 = cr2 - ci3; + dr3 = cr2 + ci3; + di2 = ci2 + cr3; + di3 = ci2 - cr3; + + int widx1 = i - 1 + iw1; + int widx2 = i - 1 + iw2; + + w1r = wtable_r[widx1 - 1]; + w1i = wtable_r[widx1]; + w2r = wtable_r[widx2 - 1]; + w2i = wtable_r[widx2]; + + int oidx1 = idx9 + idx1; + int oidx2 = idx9 + idx5; + int oidx3 = idx9 + idx6; + + out[oidx1 - 1] = i1i + tr2; + out[oidx1] = i1r + ti2; + out[oidx2 - 1] = w1r * dr2 - w1i * di2; + out[oidx2] = w1r * di2 + w1i * dr2; + out[oidx3 - 1] = w2r * dr3 - w2i * di3; + out[oidx3] = w2r * di3 + w2i * dr3; + } + } + } + + /*------------------------------------------------- + radf4: Real FFT's forward processing of factor 4 + -------------------------------------------------*/ + void radf4(final int ido, final int l1, final float[] in, final int in_off, final float[] out, final int out_off, final int offset) { + final float hsqt2 = 0.707106781186547572737310929369414225f; + int i, ic; + float ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4, w1r, w1i, w2r, w2i, w3r, w3i; + int iw1, iw2, iw3; + iw1 = offset; + iw2 = offset + ido; + iw3 = iw2 + ido; + int idx0 = l1 * ido; + for (int k = 0; k < l1; k++) { + int idx1 = k * ido; + int idx2 = 4 * idx1; + int idx3 = idx1 + idx0; + int idx4 = idx3 + idx0; + int idx5 = idx4 + idx0; + int idx6 = idx2 + ido; + float i1r = in[in_off + idx1]; + float i2r = in[in_off + idx3]; + float i3r = in[in_off + idx4]; + float i4r = in[in_off + idx5]; + + tr1 = i2r + i4r; + tr2 = i1r + i3r; + + int oidx1 = out_off + idx2; + int oidx2 = out_off + idx6 + ido; + + out[oidx1] = tr1 + tr2; + out[oidx2 - 1 + ido + ido] = tr2 - tr1; + out[oidx2 - 1] = i1r - i3r; + out[oidx2] = i4r - i2r; + } + if (ido < 2) + return; + if (ido != 2) { + for (int k = 0; k < l1; k++) { + int idx1 = k * ido; + int idx2 = idx1 + idx0; + int idx3 = idx2 + idx0; + int idx4 = idx3 + idx0; + int idx5 = 4 * idx1; + int idx6 = idx5 + ido; + int idx7 = idx6 + ido; + int idx8 = idx7 + ido; + for (i = 2; i < ido; i += 2) { + ic = ido - i; + int widx1 = i - 1 + iw1; + int widx2 = i - 1 + iw2; + int widx3 = i - 1 + iw3; + w1r = wtable_r[widx1 - 1]; + w1i = wtable_r[widx1]; + w2r = wtable_r[widx2 - 1]; + w2i = wtable_r[widx2]; + w3r = wtable_r[widx3 - 1]; + w3i = wtable_r[widx3]; + + int idx9 = in_off + i; + int idx10 = out_off + i; + int idx11 = out_off + ic; + int iidx1 = idx9 + idx1; + int iidx2 = idx9 + idx2; + int iidx3 = idx9 + idx3; + int iidx4 = idx9 + idx4; + + float i1i = in[iidx1 - 1]; + float i1r = in[iidx1]; + float i2i = in[iidx2 - 1]; + float i2r = in[iidx2]; + float i3i = in[iidx3 - 1]; + float i3r = in[iidx3]; + float i4i = in[iidx4 - 1]; + float i4r = in[iidx4]; + + cr2 = w1r * i2i + w1i * i2r; + ci2 = w1r * i2r - w1i * i2i; + cr3 = w2r * i3i + w2i * i3r; + ci3 = w2r * i3r - w2i * i3i; + cr4 = w3r * i4i + w3i * i4r; + ci4 = w3r * i4r - w3i * i4i; + tr1 = cr2 + cr4; + tr4 = cr4 - cr2; + ti1 = ci2 + ci4; + ti4 = ci2 - ci4; + ti2 = i1r + ci3; + ti3 = i1r - ci3; + tr2 = i1i + cr3; + tr3 = i1i - cr3; + + int oidx1 = idx10 + idx5; + int oidx2 = idx11 + idx6; + int oidx3 = idx10 + idx7; + int oidx4 = idx11 + idx8; + + out[oidx1 - 1] = tr1 + tr2; + out[oidx4 - 1] = tr2 - tr1; + out[oidx1] = ti1 + ti2; + out[oidx4] = ti1 - ti2; + out[oidx3 - 1] = ti4 + tr3; + out[oidx2 - 1] = tr3 - ti4; + out[oidx3] = tr4 + ti3; + out[oidx2] = tr4 - ti3; + } + } + if (ido % 2 == 1) + return; + } + for (int k = 0; k < l1; k++) { + int idx1 = k * ido; + int idx2 = 4 * idx1; + int idx3 = idx1 + idx0; + int idx4 = idx3 + idx0; + int idx5 = idx4 + idx0; + int idx6 = idx2 + ido; + int idx7 = idx6 + ido; + int idx8 = idx7 + ido; + int idx9 = in_off + ido; + int idx10 = out_off + ido; + + float i1i = in[idx9 - 1 + idx1]; + float i2i = in[idx9 - 1 + idx3]; + float i3i = in[idx9 - 1 + idx4]; + float i4i = in[idx9 - 1 + idx5]; + + ti1 = -hsqt2 * (i2i + i4i); + tr1 = hsqt2 * (i2i - i4i); + + out[idx10 - 1 + idx2] = tr1 + i1i; + out[idx10 - 1 + idx7] = i1i - tr1; + out[out_off + idx6] = ti1 - i3i; + out[out_off + idx8] = ti1 + i3i; + } + } + + /*------------------------------------------------- + radb4: Real FFT's backward processing of factor 4 + -------------------------------------------------*/ + void radb4(final int ido, final int l1, final float[] in, final int in_off, final float[] out, final int out_off, final int offset) { + final float sqrt2 = 1.41421356237309514547462185873882845f; + int i, ic; + float ci2, ci3, ci4, cr2, cr3, cr4; + float ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4, w1r, w1i, w2r, w2i, w3r, w3i; + int iw1, iw2, iw3; + iw1 = offset; + iw2 = iw1 + ido; + iw3 = iw2 + ido; + + int idx0 = l1 * ido; + for (int k = 0; k < l1; k++) { + int idx1 = k * ido; + int idx2 = 4 * idx1; + int idx3 = idx1 + idx0; + int idx4 = idx3 + idx0; + int idx5 = idx4 + idx0; + int idx6 = idx2 + ido; + int idx7 = idx6 + ido; + int idx8 = idx7 + ido; + + float i1r = in[in_off + idx2]; + float i2r = in[in_off + idx7]; + float i3r = in[in_off + ido - 1 + idx8]; + float i4r = in[in_off + ido - 1 + idx6]; + + tr1 = i1r - i3r; + tr2 = i1r + i3r; + tr3 = i4r + i4r; + tr4 = i2r + i2r; + + out[out_off + idx1] = tr2 + tr3; + out[out_off + idx3] = tr1 - tr4; + out[out_off + idx4] = tr2 - tr3; + out[out_off + idx5] = tr1 + tr4; + } + if (ido < 2) + return; + if (ido != 2) { + for (int k = 0; k < l1; ++k) { + int idx1 = k * ido; + int idx2 = idx1 + idx0; + int idx3 = idx2 + idx0; + int idx4 = idx3 + idx0; + int idx5 = 4 * idx1; + int idx6 = idx5 + ido; + int idx7 = idx6 + ido; + int idx8 = idx7 + ido; + for (i = 2; i < ido; i += 2) { + ic = ido - i; + int widx1 = i - 1 + iw1; + int widx2 = i - 1 + iw2; + int widx3 = i - 1 + iw3; + w1r = wtable_r[widx1 - 1]; + w1i = wtable_r[widx1]; + w2r = wtable_r[widx2 - 1]; + w2i = wtable_r[widx2]; + w3r = wtable_r[widx3 - 1]; + w3i = wtable_r[widx3]; + + int idx12 = in_off + i; + int idx13 = in_off + ic; + int idx14 = out_off + i; + + int iidx1 = idx12 + idx5; + int iidx2 = idx13 + idx6; + int iidx3 = idx12 + idx7; + int iidx4 = idx13 + idx8; + + float i1i = in[iidx1 - 1]; + float i1r = in[iidx1]; + float i2i = in[iidx2 - 1]; + float i2r = in[iidx2]; + float i3i = in[iidx3 - 1]; + float i3r = in[iidx3]; + float i4i = in[iidx4 - 1]; + float i4r = in[iidx4]; + + ti1 = i1r + i4r; + ti2 = i1r - i4r; + ti3 = i3r - i2r; + tr4 = i3r + i2r; + tr1 = i1i - i4i; + tr2 = i1i + i4i; + ti4 = i3i - i2i; + tr3 = i3i + i2i; + cr3 = tr2 - tr3; + ci3 = ti2 - ti3; + cr2 = tr1 - tr4; + cr4 = tr1 + tr4; + ci2 = ti1 + ti4; + ci4 = ti1 - ti4; + + int oidx1 = idx14 + idx1; + int oidx2 = idx14 + idx2; + int oidx3 = idx14 + idx3; + int oidx4 = idx14 + idx4; + + out[oidx1 - 1] = tr2 + tr3; + out[oidx1] = ti2 + ti3; + out[oidx2 - 1] = w1r * cr2 - w1i * ci2; + out[oidx2] = w1r * ci2 + w1i * cr2; + out[oidx3 - 1] = w2r * cr3 - w2i * ci3; + out[oidx3] = w2r * ci3 + w2i * cr3; + out[oidx4 - 1] = w3r * cr4 - w3i * ci4; + out[oidx4] = w3r * ci4 + w3i * cr4; + } + } + if (ido % 2 == 1) + return; + } + for (int k = 0; k < l1; k++) { + int idx1 = k * ido; + int idx2 = 4 * idx1; + int idx3 = idx1 + idx0; + int idx4 = idx3 + idx0; + int idx5 = idx4 + idx0; + int idx6 = idx2 + ido; + int idx7 = idx6 + ido; + int idx8 = idx7 + ido; + int idx9 = in_off + ido; + int idx10 = out_off + ido; + + float i1r = in[idx9 - 1 + idx2]; + float i2r = in[idx9 - 1 + idx7]; + float i3r = in[in_off + idx6]; + float i4r = in[in_off + idx8]; + + ti1 = i3r + i4r; + ti2 = i4r - i3r; + tr1 = i1r - i2r; + tr2 = i1r + i2r; + + out[idx10 - 1 + idx1] = tr2 + tr2; + out[idx10 - 1 + idx3] = sqrt2 * (tr1 - ti1); + out[idx10 - 1 + idx4] = ti2 + ti2; + out[idx10 - 1 + idx5] = -sqrt2 * (tr1 + ti1); + } + } + + /*------------------------------------------------- + radf5: Real FFT's forward processing of factor 5 + -------------------------------------------------*/ + void radf5(final int ido, final int l1, final float[] in, final int in_off, final float[] out, final int out_off, final int offset) { + final float tr11 = 0.309016994374947451262869435595348477f; + final float ti11 = 0.951056516295153531181938433292089030f; + final float tr12 = -0.809016994374947340240566973079694435f; + final float ti12 = 0.587785252292473248125759255344746634f; + int i, ic; + float ci2, di2, ci4, ci5, di3, di4, di5, ci3, cr2, cr3, dr2, dr3, dr4, dr5, cr5, cr4, ti2, ti3, ti5, ti4, tr2, tr3, tr4, tr5, w1r, w1i, w2r, w2i, w3r, w3i, w4r, w4i; + int iw1, iw2, iw3, iw4; + iw1 = offset; + iw2 = iw1 + ido; + iw3 = iw2 + ido; + iw4 = iw3 + ido; + + int idx0 = l1 * ido; + for (int k = 0; k < l1; k++) { + int idx1 = k * ido; + int idx2 = 5 * idx1; + int idx3 = idx2 + ido; + int idx4 = idx3 + ido; + int idx5 = idx4 + ido; + int idx6 = idx5 + ido; + int idx7 = idx1 + idx0; + int idx8 = idx7 + idx0; + int idx9 = idx8 + idx0; + int idx10 = idx9 + idx0; + int idx11 = out_off + ido - 1; + + float i1r = in[in_off + idx1]; + float i2r = in[in_off + idx7]; + float i3r = in[in_off + idx8]; + float i4r = in[in_off + idx9]; + float i5r = in[in_off + idx10]; + + cr2 = i5r + i2r; + ci5 = i5r - i2r; + cr3 = i4r + i3r; + ci4 = i4r - i3r; + + out[out_off + idx2] = i1r + cr2 + cr3; + out[idx11 + idx3] = i1r + tr11 * cr2 + tr12 * cr3; + out[out_off + idx4] = ti11 * ci5 + ti12 * ci4; + out[idx11 + idx5] = i1r + tr12 * cr2 + tr11 * cr3; + out[out_off + idx6] = ti12 * ci5 - ti11 * ci4; + } + if (ido == 1) + return; + for (int k = 0; k < l1; ++k) { + int idx1 = k * ido; + int idx2 = 5 * idx1; + int idx3 = idx2 + ido; + int idx4 = idx3 + ido; + int idx5 = idx4 + ido; + int idx6 = idx5 + ido; + int idx7 = idx1 + idx0; + int idx8 = idx7 + idx0; + int idx9 = idx8 + idx0; + int idx10 = idx9 + idx0; + for (i = 2; i < ido; i += 2) { + int widx1 = i - 1 + iw1; + int widx2 = i - 1 + iw2; + int widx3 = i - 1 + iw3; + int widx4 = i - 1 + iw4; + w1r = wtable_r[widx1 - 1]; + w1i = wtable_r[widx1]; + w2r = wtable_r[widx2 - 1]; + w2i = wtable_r[widx2]; + w3r = wtable_r[widx3 - 1]; + w3i = wtable_r[widx3]; + w4r = wtable_r[widx4 - 1]; + w4i = wtable_r[widx4]; + + ic = ido - i; + int idx15 = in_off + i; + int idx16 = out_off + i; + int idx17 = out_off + ic; + + int iidx1 = idx15 + idx1; + int iidx2 = idx15 + idx7; + int iidx3 = idx15 + idx8; + int iidx4 = idx15 + idx9; + int iidx5 = idx15 + idx10; + + float i1i = in[iidx1 - 1]; + float i1r = in[iidx1]; + float i2i = in[iidx2 - 1]; + float i2r = in[iidx2]; + float i3i = in[iidx3 - 1]; + float i3r = in[iidx3]; + float i4i = in[iidx4 - 1]; + float i4r = in[iidx4]; + float i5i = in[iidx5 - 1]; + float i5r = in[iidx5]; + + dr2 = w1r * i2i + w1i * i2r; + di2 = w1r * i2r - w1i * i2i; + dr3 = w2r * i3i + w2i * i3r; + di3 = w2r * i3r - w2i * i3i; + dr4 = w3r * i4i + w3i * i4r; + di4 = w3r * i4r - w3i * i4i; + dr5 = w4r * i5i + w4i * i5r; + di5 = w4r * i5r - w4i * i5i; + + cr2 = dr2 + dr5; + ci5 = dr5 - dr2; + cr5 = di2 - di5; + ci2 = di2 + di5; + cr3 = dr3 + dr4; + ci4 = dr4 - dr3; + cr4 = di3 - di4; + ci3 = di3 + di4; + + tr2 = i1i + tr11 * cr2 + tr12 * cr3; + ti2 = i1r + tr11 * ci2 + tr12 * ci3; + tr3 = i1i + tr12 * cr2 + tr11 * cr3; + ti3 = i1r + tr12 * ci2 + tr11 * ci3; + tr5 = ti11 * cr5 + ti12 * cr4; + ti5 = ti11 * ci5 + ti12 * ci4; + tr4 = ti12 * cr5 - ti11 * cr4; + ti4 = ti12 * ci5 - ti11 * ci4; + + int oidx1 = idx16 + idx2; + int oidx2 = idx17 + idx3; + int oidx3 = idx16 + idx4; + int oidx4 = idx17 + idx5; + int oidx5 = idx16 + idx6; + + out[oidx1 - 1] = i1i + cr2 + cr3; + out[oidx1] = i1r + ci2 + ci3; + out[oidx3 - 1] = tr2 + tr5; + out[oidx2 - 1] = tr2 - tr5; + out[oidx3] = ti2 + ti5; + out[oidx2] = ti5 - ti2; + out[oidx5 - 1] = tr3 + tr4; + out[oidx4 - 1] = tr3 - tr4; + out[oidx5] = ti3 + ti4; + out[oidx4] = ti4 - ti3; + } + } + } + + /*------------------------------------------------- + radb5: Real FFT's backward processing of factor 5 + -------------------------------------------------*/ + void radb5(final int ido, final int l1, final float[] in, final int in_off, final float[] out, final int out_off, final int offset) { + final float tr11 = 0.309016994374947451262869435595348477f; + final float ti11 = 0.951056516295153531181938433292089030f; + final float tr12 = -0.809016994374947340240566973079694435f; + final float ti12 = 0.587785252292473248125759255344746634f; + int i, ic; + float ci2, ci3, ci4, ci5, di3, di4, di5, di2, cr2, cr3, cr5, cr4, ti2, ti3, ti4, ti5, dr3, dr4, dr5, dr2, tr2, tr3, tr4, tr5, w1r, w1i, w2r, w2i, w3r, w3i, w4r, w4i; + int iw1, iw2, iw3, iw4; + iw1 = offset; + iw2 = iw1 + ido; + iw3 = iw2 + ido; + iw4 = iw3 + ido; + + int idx0 = l1 * ido; + for (int k = 0; k < l1; k++) { + int idx1 = k * ido; + int idx2 = 5 * idx1; + int idx3 = idx2 + ido; + int idx4 = idx3 + ido; + int idx5 = idx4 + ido; + int idx6 = idx5 + ido; + int idx7 = idx1 + idx0; + int idx8 = idx7 + idx0; + int idx9 = idx8 + idx0; + int idx10 = idx9 + idx0; + int idx11 = in_off + ido - 1; + + float i1r = in[in_off + idx2]; + + ti5 = 2 * in[in_off + idx4]; + ti4 = 2 * in[in_off + idx6]; + tr2 = 2 * in[idx11 + idx3]; + tr3 = 2 * in[idx11 + idx5]; + cr2 = i1r + tr11 * tr2 + tr12 * tr3; + cr3 = i1r + tr12 * tr2 + tr11 * tr3; + ci5 = ti11 * ti5 + ti12 * ti4; + ci4 = ti12 * ti5 - ti11 * ti4; + + out[out_off + idx1] = i1r + tr2 + tr3; + out[out_off + idx7] = cr2 - ci5; + out[out_off + idx8] = cr3 - ci4; + out[out_off + idx9] = cr3 + ci4; + out[out_off + idx10] = cr2 + ci5; + } + if (ido == 1) + return; + for (int k = 0; k < l1; ++k) { + int idx1 = k * ido; + int idx2 = 5 * idx1; + int idx3 = idx2 + ido; + int idx4 = idx3 + ido; + int idx5 = idx4 + ido; + int idx6 = idx5 + ido; + int idx7 = idx1 + idx0; + int idx8 = idx7 + idx0; + int idx9 = idx8 + idx0; + int idx10 = idx9 + idx0; + for (i = 2; i < ido; i += 2) { + ic = ido - i; + int widx1 = i - 1 + iw1; + int widx2 = i - 1 + iw2; + int widx3 = i - 1 + iw3; + int widx4 = i - 1 + iw4; + w1r = wtable_r[widx1 - 1]; + w1i = wtable_r[widx1]; + w2r = wtable_r[widx2 - 1]; + w2i = wtable_r[widx2]; + w3r = wtable_r[widx3 - 1]; + w3i = wtable_r[widx3]; + w4r = wtable_r[widx4 - 1]; + w4i = wtable_r[widx4]; + + int idx15 = in_off + i; + int idx16 = in_off + ic; + int idx17 = out_off + i; + + int iidx1 = idx15 + idx2; + int iidx2 = idx16 + idx3; + int iidx3 = idx15 + idx4; + int iidx4 = idx16 + idx5; + int iidx5 = idx15 + idx6; + + float i1i = in[iidx1 - 1]; + float i1r = in[iidx1]; + float i2i = in[iidx2 - 1]; + float i2r = in[iidx2]; + float i3i = in[iidx3 - 1]; + float i3r = in[iidx3]; + float i4i = in[iidx4 - 1]; + float i4r = in[iidx4]; + float i5i = in[iidx5 - 1]; + float i5r = in[iidx5]; + + ti5 = i3r + i2r; + ti2 = i3r - i2r; + ti4 = i5r + i4r; + ti3 = i5r - i4r; + tr5 = i3i - i2i; + tr2 = i3i + i2i; + tr4 = i5i - i4i; + tr3 = i5i + i4i; + + cr2 = i1i + tr11 * tr2 + tr12 * tr3; + ci2 = i1r + tr11 * ti2 + tr12 * ti3; + cr3 = i1i + tr12 * tr2 + tr11 * tr3; + ci3 = i1r + tr12 * ti2 + tr11 * ti3; + cr5 = ti11 * tr5 + ti12 * tr4; + ci5 = ti11 * ti5 + ti12 * ti4; + cr4 = ti12 * tr5 - ti11 * tr4; + ci4 = ti12 * ti5 - ti11 * ti4; + dr3 = cr3 - ci4; + dr4 = cr3 + ci4; + di3 = ci3 + cr4; + di4 = ci3 - cr4; + dr5 = cr2 + ci5; + dr2 = cr2 - ci5; + di5 = ci2 - cr5; + di2 = ci2 + cr5; + + int oidx1 = idx17 + idx1; + int oidx2 = idx17 + idx7; + int oidx3 = idx17 + idx8; + int oidx4 = idx17 + idx9; + int oidx5 = idx17 + idx10; + + out[oidx1 - 1] = i1i + tr2 + tr3; + out[oidx1] = i1r + ti2 + ti3; + out[oidx2 - 1] = w1r * dr2 - w1i * di2; + out[oidx2] = w1r * di2 + w1i * dr2; + out[oidx3 - 1] = w2r * dr3 - w2i * di3; + out[oidx3] = w2r * di3 + w2i * dr3; + out[oidx4 - 1] = w3r * dr4 - w3i * di4; + out[oidx4] = w3r * di4 + w3i * dr4; + out[oidx5 - 1] = w4r * dr5 - w4i * di5; + out[oidx5] = w4r * di5 + w4i * dr5; + } + } + } + + /*--------------------------------------------------------- + radfg: Real FFT's forward processing of general factor + --------------------------------------------------------*/ + void radfg(final int ido, final int ip, final int l1, final int idl1, final float[] in, final int in_off, final float[] out, final int out_off, final int offset) { + int idij, ipph, j2, ic, jc, lc, is, nbd; + float dc2, ai1, ai2, ar1, ar2, ds2, dcp, arg, dsp, ar1h, ar2h, w1r, w1i; + int iw1 = offset; + + arg = TWO_PI / (float) ip; + dcp = (float)Math.cos(arg); + dsp = (float)Math.sin(arg); + ipph = (ip + 1) / 2; + nbd = (ido - 1) / 2; + if (ido != 1) { + if (idl1 >= 0) System.arraycopy(in, in_off + 0, out, out_off + 0, idl1); + for (int j = 1; j < ip; j++) { + int idx1 = j * l1 * ido; + for (int k = 0; k < l1; k++) { + int idx2 = k * ido + idx1; + out[out_off + idx2] = in[in_off + idx2]; + } + } + if (nbd <= l1) { + is = -ido; + for (int j = 1; j < ip; j++) { + is += ido; + idij = is - 1; + int idx1 = j * l1 * ido; + for (int i = 2; i < ido; i += 2) { + idij += 2; + int idx2 = idij + iw1; + int idx4 = in_off + i; + int idx5 = out_off + i; + w1r = wtable_r[idx2 - 1]; + w1i = wtable_r[idx2]; + for (int k = 0; k < l1; k++) { + int idx3 = k * ido + idx1; + int oidx1 = idx5 + idx3; + int iidx1 = idx4 + idx3; + float i1i = in[iidx1 - 1]; + float i1r = in[iidx1]; + + out[oidx1 - 1] = w1r * i1i + w1i * i1r; + out[oidx1] = w1r * i1r - w1i * i1i; + } + } + } + } else { + is = -ido; + for (int j = 1; j < ip; j++) { + is += ido; + int idx1 = j * l1 * ido; + for (int k = 0; k < l1; k++) { + idij = is - 1; + int idx3 = k * ido + idx1; + for (int i = 2; i < ido; i += 2) { + idij += 2; + int idx2 = idij + iw1; + w1r = wtable_r[idx2 - 1]; + w1i = wtable_r[idx2]; + int oidx1 = out_off + i + idx3; + int iidx1 = in_off + i + idx3; + float i1i = in[iidx1 - 1]; + float i1r = in[iidx1]; + + out[oidx1 - 1] = w1r * i1i + w1i * i1r; + out[oidx1] = w1r * i1r - w1i * i1i; + } + } + } + } + if (nbd >= l1) { + for (int j = 1; j < ipph; j++) { + jc = ip - j; + int idx1 = j * l1 * ido; + int idx2 = jc * l1 * ido; + for (int k = 0; k < l1; k++) { + int idx3 = k * ido + idx1; + int idx4 = k * ido + idx2; + for (int i = 2; i < ido; i += 2) { + int idx5 = in_off + i; + int idx6 = out_off + i; + int iidx1 = idx5 + idx3; + int iidx2 = idx5 + idx4; + int oidx1 = idx6 + idx3; + int oidx2 = idx6 + idx4; + float o1i = out[oidx1 - 1]; + float o1r = out[oidx1]; + float o2i = out[oidx2 - 1]; + float o2r = out[oidx2]; + + in[iidx1 - 1] = o1i + o2i; + in[iidx1] = o1r + o2r; + + in[iidx2 - 1] = o1r - o2r; + in[iidx2] = o2i - o1i; + } + } + } + } else { + for (int j = 1; j < ipph; j++) { + jc = ip - j; + int idx1 = j * l1 * ido; + int idx2 = jc * l1 * ido; + for (int i = 2; i < ido; i += 2) { + int idx5 = in_off + i; + int idx6 = out_off + i; + for (int k = 0; k < l1; k++) { + int idx3 = k * ido + idx1; + int idx4 = k * ido + idx2; + int iidx1 = idx5 + idx3; + int iidx2 = idx5 + idx4; + int oidx1 = idx6 + idx3; + int oidx2 = idx6 + idx4; + float o1i = out[oidx1 - 1]; + float o1r = out[oidx1]; + float o2i = out[oidx2 - 1]; + float o2r = out[oidx2]; + + in[iidx1 - 1] = o1i + o2i; + in[iidx1] = o1r + o2r; + in[iidx2 - 1] = o1r - o2r; + in[iidx2] = o2i - o1i; + } + } + } + } + } else { + System.arraycopy(out, out_off, in, in_off, idl1); + } + for (int j = 1; j < ipph; j++) { + jc = ip - j; + int idx1 = j * l1 * ido; + int idx2 = jc * l1 * ido; + for (int k = 0; k < l1; k++) { + int idx3 = k * ido + idx1; + int idx4 = k * ido + idx2; + int oidx1 = out_off + idx3; + int oidx2 = out_off + idx4; + float o1r = out[oidx1]; + float o2r = out[oidx2]; + + in[in_off + idx3] = o1r + o2r; + in[in_off + idx4] = o2r - o1r; + } + } + + ar1 = 1; + ai1 = 0; + int idx0 = (ip - 1) * idl1; + for (int l = 1; l < ipph; l++) { + lc = ip - l; + ar1h = dcp * ar1 - dsp * ai1; + ai1 = dcp * ai1 + dsp * ar1; + ar1 = ar1h; + int idx1 = l * idl1; + int idx2 = lc * idl1; + for (int ik = 0; ik < idl1; ik++) { + int idx3 = out_off + ik; + int idx4 = in_off + ik; + out[idx3 + idx1] = in[idx4] + ar1 * in[idx4 + idl1]; + out[idx3 + idx2] = ai1 * in[idx4 + idx0]; + } + dc2 = ar1; + ds2 = ai1; + ar2 = ar1; + ai2 = ai1; + for (int j = 2; j < ipph; j++) { + jc = ip - j; + ar2h = dc2 * ar2 - ds2 * ai2; + ai2 = dc2 * ai2 + ds2 * ar2; + ar2 = ar2h; + int idx3 = j * idl1; + int idx4 = jc * idl1; + for (int ik = 0; ik < idl1; ik++) { + int idx5 = out_off + ik; + int idx6 = in_off + ik; + out[idx5 + idx1] += ar2 * in[idx6 + idx3]; + out[idx5 + idx2] += ai2 * in[idx6 + idx4]; + } + } + } + for (int j = 1; j < ipph; j++) { + int idx1 = j * idl1; + for (int ik = 0; ik < idl1; ik++) { + out[out_off + ik] += in[in_off + ik + idx1]; + } + } + + if (ido >= l1) { + for (int k = 0; k < l1; k++) { + int idx1 = k * ido; + int idx2 = idx1 * ip; + for (int i = 0; i < ido; i++) { + in[in_off + i + idx2] = out[out_off + i + idx1]; + } + } + } else { + for (int i = 0; i < ido; i++) { + for (int k = 0; k < l1; k++) { + int idx1 = k * ido; + in[in_off + i + idx1 * ip] = out[out_off + i + idx1]; + } + } + } + int idx01 = ip * ido; + for (int j = 1; j < ipph; j++) { + jc = ip - j; + j2 = 2 * j; + int idx1 = j * l1 * ido; + int idx2 = jc * l1 * ido; + int idx3 = j2 * ido; + for (int k = 0; k < l1; k++) { + int idx4 = k * ido; + int idx5 = idx4 + idx1; + int idx6 = idx4 + idx2; + int idx7 = k * idx01; + in[in_off + ido - 1 + idx3 - ido + idx7] = out[out_off + idx5]; + in[in_off + idx3 + idx7] = out[out_off + idx6]; + } + } + if (ido == 1) + return; + if (nbd >= l1) { + for (int j = 1; j < ipph; j++) { + jc = ip - j; + j2 = 2 * j; + int idx1 = j * l1 * ido; + int idx2 = jc * l1 * ido; + int idx3 = j2 * ido; + for (int k = 0; k < l1; k++) { + int idx4 = k * idx01; + int idx5 = k * ido; + for (int i = 2; i < ido; i += 2) { + ic = ido - i; + int idx6 = in_off + i; + int idx7 = in_off + ic; + int idx8 = out_off + i; + int iidx1 = idx6 + idx3 + idx4; + int iidx2 = idx7 + idx3 - ido + idx4; + int oidx1 = idx8 + idx5 + idx1; + int oidx2 = idx8 + idx5 + idx2; + float o1i = out[oidx1 - 1]; + float o1r = out[oidx1]; + float o2i = out[oidx2 - 1]; + float o2r = out[oidx2]; + + in[iidx1 - 1] = o1i + o2i; + in[iidx2 - 1] = o1i - o2i; + in[iidx1] = o1r + o2r; + in[iidx2] = o2r - o1r; + } + } + } + } else { + for (int j = 1; j < ipph; j++) { + jc = ip - j; + j2 = 2 * j; + int idx1 = j * l1 * ido; + int idx2 = jc * l1 * ido; + int idx3 = j2 * ido; + for (int i = 2; i < ido; i += 2) { + ic = ido - i; + int idx6 = in_off + i; + int idx7 = in_off + ic; + int idx8 = out_off + i; + for (int k = 0; k < l1; k++) { + int idx4 = k * idx01; + int idx5 = k * ido; + int iidx1 = idx6 + idx3 + idx4; + int iidx2 = idx7 + idx3 - ido + idx4; + int oidx1 = idx8 + idx5 + idx1; + int oidx2 = idx8 + idx5 + idx2; + float o1i = out[oidx1 - 1]; + float o1r = out[oidx1]; + float o2i = out[oidx2 - 1]; + float o2r = out[oidx2]; + + in[iidx1 - 1] = o1i + o2i; + in[iidx2 - 1] = o1i - o2i; + in[iidx1] = o1r + o2r; + in[iidx2] = o2r - o1r; + } + } + } + } + } + + /*--------------------------------------------------------- + radbg: Real FFT's backward processing of general factor + --------------------------------------------------------*/ + void radbg(final int ido, final int ip, final int l1, final int idl1, final float[] in, final int in_off, final float[] out, final int out_off, final int offset) { + int idij, ipph, j2, ic, jc, lc, is; + float dc2, ai1, ai2, ar1, ar2, ds2, w1r, w1i; + int nbd; + float dcp, arg, dsp, ar1h, ar2h; + int iw1 = offset; + + arg = TWO_PI / (float) ip; + dcp = (float)Math.cos(arg); + dsp = (float)Math.sin(arg); + nbd = (ido - 1) / 2; + ipph = (ip + 1) / 2; + int idx0 = ip * ido; + if (ido >= l1) { + for (int k = 0; k < l1; k++) { + int idx1 = k * ido; + int idx2 = k * idx0; + for (int i = 0; i < ido; i++) { + out[out_off + i + idx1] = in[in_off + i + idx2]; + } + } + } else { + for (int i = 0; i < ido; i++) { + int idx1 = out_off + i; + int idx2 = in_off + i; + for (int k = 0; k < l1; k++) { + out[idx1 + k * ido] = in[idx2 + k * idx0]; + } + } + } + int iidx0 = in_off + ido - 1; + for (int j = 1; j < ipph; j++) { + jc = ip - j; + j2 = 2 * j; + int idx1 = j * l1 * ido; + int idx2 = jc * l1 * ido; + int idx3 = j2 * ido; + for (int k = 0; k < l1; k++) { + int idx4 = k * ido; + int idx5 = idx4 * ip; + int iidx1 = iidx0 + idx3 + idx5 - ido; + int iidx2 = in_off + idx3 + idx5; + float i1r = in[iidx1]; + float i2r = in[iidx2]; + + out[out_off + idx4 + idx1] = i1r + i1r; + out[out_off + idx4 + idx2] = i2r + i2r; + } + } + + if (ido != 1) { + if (nbd >= l1) { + for (int j = 1; j < ipph; j++) { + jc = ip - j; + int idx1 = j * l1 * ido; + int idx2 = jc * l1 * ido; + int idx3 = 2 * j * ido; + for (int k = 0; k < l1; k++) { + int idx4 = k * ido + idx1; + int idx5 = k * ido + idx2; + int idx6 = k * ip * ido + idx3; + for (int i = 2; i < ido; i += 2) { + ic = ido - i; + int idx7 = out_off + i; + int idx8 = in_off + ic; + int idx9 = in_off + i; + int oidx1 = idx7 + idx4; + int oidx2 = idx7 + idx5; + int iidx1 = idx9 + idx6; + int iidx2 = idx8 + idx6 - ido; + float a1i = in[iidx1 - 1]; + float a1r = in[iidx1]; + float a2i = in[iidx2 - 1]; + float a2r = in[iidx2]; + + out[oidx1 - 1] = a1i + a2i; + out[oidx2 - 1] = a1i - a2i; + out[oidx1] = a1r - a2r; + out[oidx2] = a1r + a2r; + } + } + } + } else { + for (int j = 1; j < ipph; j++) { + jc = ip - j; + int idx1 = j * l1 * ido; + int idx2 = jc * l1 * ido; + int idx3 = 2 * j * ido; + for (int i = 2; i < ido; i += 2) { + ic = ido - i; + int idx7 = out_off + i; + int idx8 = in_off + ic; + int idx9 = in_off + i; + for (int k = 0; k < l1; k++) { + int idx4 = k * ido + idx1; + int idx5 = k * ido + idx2; + int idx6 = k * ip * ido + idx3; + int oidx1 = idx7 + idx4; + int oidx2 = idx7 + idx5; + int iidx1 = idx9 + idx6; + int iidx2 = idx8 + idx6 - ido; + float a1i = in[iidx1 - 1]; + float a1r = in[iidx1]; + float a2i = in[iidx2 - 1]; + float a2r = in[iidx2]; + + out[oidx1 - 1] = a1i + a2i; + out[oidx2 - 1] = a1i - a2i; + out[oidx1] = a1r - a2r; + out[oidx2] = a1r + a2r; + } + } + } + } + } + + ar1 = 1; + ai1 = 0; + int idx01 = (ip - 1) * idl1; + for (int l = 1; l < ipph; l++) { + lc = ip - l; + ar1h = dcp * ar1 - dsp * ai1; + ai1 = dcp * ai1 + dsp * ar1; + ar1 = ar1h; + int idx1 = l * idl1; + int idx2 = lc * idl1; + for (int ik = 0; ik < idl1; ik++) { + int idx3 = in_off + ik; + int idx4 = out_off + ik; + in[idx3 + idx1] = out[idx4] + ar1 * out[idx4 + idl1]; + in[idx3 + idx2] = ai1 * out[idx4 + idx01]; + } + dc2 = ar1; + ds2 = ai1; + ar2 = ar1; + ai2 = ai1; + for (int j = 2; j < ipph; j++) { + jc = ip - j; + ar2h = dc2 * ar2 - ds2 * ai2; + ai2 = dc2 * ai2 + ds2 * ar2; + ar2 = ar2h; + int idx5 = j * idl1; + int idx6 = jc * idl1; + for (int ik = 0; ik < idl1; ik++) { + int idx7 = in_off + ik; + int idx8 = out_off + ik; + in[idx7 + idx1] += ar2 * out[idx8 + idx5]; + in[idx7 + idx2] += ai2 * out[idx8 + idx6]; + } + } + } + for (int j = 1; j < ipph; j++) { + int idx1 = j * idl1; + for (int ik = 0; ik < idl1; ik++) { + int idx2 = out_off + ik; + out[idx2] += out[idx2 + idx1]; + } + } + for (int j = 1; j < ipph; j++) { + jc = ip - j; + int idx1 = j * l1 * ido; + int idx2 = jc * l1 * ido; + for (int k = 0; k < l1; k++) { + int idx3 = k * ido; + int oidx1 = out_off + idx3; + int iidx1 = in_off + idx3 + idx1; + int iidx2 = in_off + idx3 + idx2; + float i1r = in[iidx1]; + float i2r = in[iidx2]; + + out[oidx1 + idx1] = i1r - i2r; + out[oidx1 + idx2] = i1r + i2r; + } + } + + if (ido == 1) + return; + if (nbd >= l1) { + for (int j = 1; j < ipph; j++) { + jc = ip - j; + int idx1 = j * l1 * ido; + int idx2 = jc * l1 * ido; + for (int k = 0; k < l1; k++) { + int idx3 = k * ido; + for (int i = 2; i < ido; i += 2) { + int idx4 = out_off + i; + int idx5 = in_off + i; + int oidx1 = idx4 + idx3 + idx1; + int oidx2 = idx4 + idx3 + idx2; + int iidx1 = idx5 + idx3 + idx1; + int iidx2 = idx5 + idx3 + idx2; + float i1i = in[iidx1 - 1]; + float i1r = in[iidx1]; + float i2i = in[iidx2 - 1]; + float i2r = in[iidx2]; + + out[oidx1 - 1] = i1i - i2r; + out[oidx2 - 1] = i1i + i2r; + out[oidx1] = i1r + i2i; + out[oidx2] = i1r - i2i; + } + } + } + } else { + for (int j = 1; j < ipph; j++) { + jc = ip - j; + int idx1 = j * l1 * ido; + int idx2 = jc * l1 * ido; + for (int i = 2; i < ido; i += 2) { + int idx4 = out_off + i; + int idx5 = in_off + i; + for (int k = 0; k < l1; k++) { + int idx3 = k * ido; + int oidx1 = idx4 + idx3 + idx1; + int oidx2 = idx4 + idx3 + idx2; + int iidx1 = idx5 + idx3 + idx1; + int iidx2 = idx5 + idx3 + idx2; + float i1i = in[iidx1 - 1]; + float i1r = in[iidx1]; + float i2i = in[iidx2 - 1]; + float i2r = in[iidx2]; + + out[oidx1 - 1] = i1i - i2r; + out[oidx2 - 1] = i1i + i2r; + out[oidx1] = i1r + i2i; + out[oidx2] = i1r - i2i; + } + } + } + } + System.arraycopy(out, out_off, in, in_off, idl1); + for (int j = 1; j < ip; j++) { + int idx1 = j * l1 * ido; + for (int k = 0; k < l1; k++) { + int idx2 = k * ido + idx1; + in[in_off + idx2] = out[out_off + idx2]; + } + } + if (nbd <= l1) { + is = -ido; + for (int j = 1; j < ip; j++) { + is += ido; + idij = is - 1; + int idx1 = j * l1 * ido; + for (int i = 2; i < ido; i += 2) { + idij += 2; + int idx2 = idij + iw1; + w1r = wtable_r[idx2 - 1]; + w1i = wtable_r[idx2]; + int idx4 = in_off + i; + int idx5 = out_off + i; + for (int k = 0; k < l1; k++) { + int idx3 = k * ido + idx1; + int iidx1 = idx4 + idx3; + int oidx1 = idx5 + idx3; + float o1i = out[oidx1 - 1]; + float o1r = out[oidx1]; + + in[iidx1 - 1] = w1r * o1i - w1i * o1r; + in[iidx1] = w1r * o1r + w1i * o1i; + } + } + } + } else { + is = -ido; + for (int j = 1; j < ip; j++) { + is += ido; + int idx1 = j * l1 * ido; + for (int k = 0; k < l1; k++) { + idij = is - 1; + int idx3 = k * ido + idx1; + for (int i = 2; i < ido; i += 2) { + idij += 2; + int idx2 = idij + iw1; + w1r = wtable_r[idx2 - 1]; + w1i = wtable_r[idx2]; + int idx4 = in_off + i; + int idx5 = out_off + i; + int iidx1 = idx4 + idx3; + int oidx1 = idx5 + idx3; + float o1i = out[oidx1 - 1]; + float o1r = out[oidx1]; + + in[iidx1 - 1] = w1r * o1i - w1i * o1r; + in[iidx1] = w1r * o1r + w1i * o1i; + + } + } + } + } + } + + /*--------------------------------------------------------- + cfftf1: further processing of Complex forward FFT + --------------------------------------------------------*/ + void cfftf(float[] a, int offa, int isign) { + int idot; + int l1, l2; + int na, nf, ip, iw, ido, idl1; + int[] nac = new int[1]; + final int twon = 2 * n; + + int iw1, iw2; + float[] ch = new float[twon]; + + iw1 = twon; + iw2 = 4 * n; + nac[0] = 0; + nf = (int) wtable[1 + iw2]; + na = 0; + l1 = 1; + iw = iw1; + for (int k1 = 2; k1 <= nf + 1; k1++) { + ip = (int) wtable[k1 + iw2]; + l2 = ip * l1; + ido = n / l2; + idot = ido + ido; + idl1 = idot * l1; + switch (ip) { + case 4: + if (na == 0) { + passf4(idot, l1, a, offa, ch, 0, iw, isign); + } else { + passf4(idot, l1, ch, 0, a, offa, iw, isign); + } + na = 1 - na; + break; + case 2: + if (na == 0) { + passf2(idot, l1, a, offa, ch, 0, iw, isign); + } else { + passf2(idot, l1, ch, 0, a, offa, iw, isign); + } + na = 1 - na; + break; + case 3: + if (na == 0) { + passf3(idot, l1, a, offa, ch, 0, iw, isign); + } else { + passf3(idot, l1, ch, 0, a, offa, iw, isign); + } + na = 1 - na; + break; + case 5: + if (na == 0) { + passf5(idot, l1, a, offa, ch, 0, iw, isign); + } else { + passf5(idot, l1, ch, 0, a, offa, iw, isign); + } + na = 1 - na; + break; + default: + if (na == 0) { + passfg(nac, idot, ip, l1, idl1, a, offa, ch, 0, iw, isign); + } else { + passfg(nac, idot, ip, l1, idl1, ch, 0, a, offa, iw, isign); + } + if (nac[0] != 0) + na = 1 - na; + break; + } + l1 = l2; + iw += (ip - 1) * idot; + } + if (na == 0) + return; + System.arraycopy(ch, 0, a, offa, twon); + + } + + /*---------------------------------------------------------------------- + passf2: Complex FFT's forward/backward processing of factor 2; + isign is +1 for backward and -1 for forward transforms + ----------------------------------------------------------------------*/ + + void passf2(final int ido, final int l1, final float[] in, final int in_off, final float[] out, final int out_off, final int offset, final int isign) { + float t1i, t1r; + int iw1; + iw1 = offset; + int idx = ido * l1; + if (ido <= 2) { + for (int k = 0; k < l1; k++) { + int idx0 = k * ido; + int iidx1 = in_off + 2 * idx0; + int iidx2 = iidx1 + ido; + float a1r = in[iidx1]; + float a1i = in[iidx1 + 1]; + float a2r = in[iidx2]; + float a2i = in[iidx2 + 1]; + + int oidx1 = out_off + idx0; + int oidx2 = oidx1 + idx; + out[oidx1] = a1r + a2r; + out[oidx1 + 1] = a1i + a2i; + out[oidx2] = a1r - a2r; + out[oidx2 + 1] = a1i - a2i; + } + } else { + for (int k = 0; k < l1; k++) { + for (int i = 0; i < ido - 1; i += 2) { + int idx0 = k * ido; + int iidx1 = in_off + i + 2 * idx0; + int iidx2 = iidx1 + ido; + float i1r = in[iidx1]; + float i1i = in[iidx1 + 1]; + float i2r = in[iidx2]; + float i2i = in[iidx2 + 1]; + + int widx1 = i + iw1; + float w1r = wtable[widx1]; + float w1i = isign * wtable[widx1 + 1]; + + t1r = i1r - i2r; + t1i = i1i - i2i; + + int oidx1 = out_off + i + idx0; + int oidx2 = oidx1 + idx; + out[oidx1] = i1r + i2r; + out[oidx1 + 1] = i1i + i2i; + out[oidx2] = w1r * t1r - w1i * t1i; + out[oidx2 + 1] = w1r * t1i + w1i * t1r; + } + } + } + } + + /*---------------------------------------------------------------------- + passf3: Complex FFT's forward/backward processing of factor 3; + isign is +1 for backward and -1 for forward transforms + ----------------------------------------------------------------------*/ + void passf3(final int ido, final int l1, final float[] in, final int in_off, final float[] out, final int out_off, final int offset, final int isign) { + final float taur = -0.5f; + final float taui = 0.866025403784438707610604524234076962f; + float ci2, ci3, di2, di3, cr2, cr3, dr2, dr3, ti2, tr2; + int iw1, iw2; + + iw1 = offset; + iw2 = iw1 + ido; + + final int idxt = l1 * ido; + + if (ido == 2) { + for (int k = 1; k <= l1; k++) { + int iidx1 = in_off + (3 * k - 2) * ido; + int iidx2 = iidx1 + ido; + int iidx3 = iidx1 - ido; + float i1r = in[iidx1]; + float i1i = in[iidx1 + 1]; + float i2r = in[iidx2]; + float i2i = in[iidx2 + 1]; + float i3r = in[iidx3]; + float i3i = in[iidx3 + 1]; + + tr2 = i1r + i2r; + cr2 = i3r + taur * tr2; + ti2 = i1i + i2i; + ci2 = i3i + taur * ti2; + cr3 = isign * taui * (i1r - i2r); + ci3 = isign * taui * (i1i - i2i); + + int oidx1 = out_off + (k - 1) * ido; + int oidx2 = oidx1 + idxt; + int oidx3 = oidx2 + idxt; + out[oidx1] = in[iidx3] + tr2; + out[oidx1 + 1] = i3i + ti2; + out[oidx2] = cr2 - ci3; + out[oidx2 + 1] = ci2 + cr3; + out[oidx3] = cr2 + ci3; + out[oidx3 + 1] = ci2 - cr3; + } + } else { + for (int k = 1; k <= l1; k++) { + int idx1 = in_off + (3 * k - 2) * ido; + int idx2 = out_off + (k - 1) * ido; + for (int i = 0; i < ido - 1; i += 2) { + int iidx1 = i + idx1; + int iidx2 = iidx1 + ido; + int iidx3 = iidx1 - ido; + float a1r = in[iidx1]; + float a1i = in[iidx1 + 1]; + float a2r = in[iidx2]; + float a2i = in[iidx2 + 1]; + float a3r = in[iidx3]; + float a3i = in[iidx3 + 1]; + + tr2 = a1r + a2r; + cr2 = a3r + taur * tr2; + ti2 = a1i + a2i; + ci2 = a3i + taur * ti2; + cr3 = isign * taui * (a1r - a2r); + ci3 = isign * taui * (a1i - a2i); + dr2 = cr2 - ci3; + dr3 = cr2 + ci3; + di2 = ci2 + cr3; + di3 = ci2 - cr3; + + int widx1 = i + iw1; + int widx2 = i + iw2; + float w1r = wtable[widx1]; + float w1i = isign * wtable[widx1 + 1]; + float w2r = wtable[widx2]; + float w2i = isign * wtable[widx2 + 1]; + + int oidx1 = i + idx2; + int oidx2 = oidx1 + idxt; + int oidx3 = oidx2 + idxt; + out[oidx1] = a3r + tr2; + out[oidx1 + 1] = a3i + ti2; + out[oidx2] = w1r * dr2 - w1i * di2; + out[oidx2 + 1] = w1r * di2 + w1i * dr2; + out[oidx3] = w2r * dr3 - w2i * di3; + out[oidx3 + 1] = w2r * di3 + w2i * dr3; + } + } + } + } + + /*---------------------------------------------------------------------- + passf4: Complex FFT's forward/backward processing of factor 4; + isign is +1 for backward and -1 for forward transforms + ----------------------------------------------------------------------*/ + void passf4(final int ido, final int l1, final float[] in, final int in_off, final float[] out, final int out_off, final int offset, final int isign) { + float ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4; + int iw1, iw2, iw3; + iw1 = offset; + iw2 = iw1 + ido; + iw3 = iw2 + ido; + + int idx0 = l1 * ido; + if (ido == 2) { + for (int k = 0; k < l1; k++) { + int idxt1 = k * ido; + int iidx1 = in_off + 4 * idxt1 + 1; + int iidx2 = iidx1 + ido; + int iidx3 = iidx2 + ido; + int iidx4 = iidx3 + ido; + + float i1i = in[iidx1 - 1]; + float i1r = in[iidx1]; + float i2i = in[iidx2 - 1]; + float i2r = in[iidx2]; + float i3i = in[iidx3 - 1]; + float i3r = in[iidx3]; + float i4i = in[iidx4 - 1]; + float i4r = in[iidx4]; + + ti1 = i1r - i3r; + ti2 = i1r + i3r; + tr4 = i4r - i2r; + ti3 = i2r + i4r; + tr1 = i1i - i3i; + tr2 = i1i + i3i; + ti4 = i2i - i4i; + tr3 = i2i + i4i; + + int oidx1 = out_off + idxt1; + int oidx2 = oidx1 + idx0; + int oidx3 = oidx2 + idx0; + int oidx4 = oidx3 + idx0; + out[oidx1] = tr2 + tr3; + out[oidx1 + 1] = ti2 + ti3; + out[oidx2] = tr1 + isign * tr4; + out[oidx2 + 1] = ti1 + isign * ti4; + out[oidx3] = tr2 - tr3; + out[oidx3 + 1] = ti2 - ti3; + out[oidx4] = tr1 - isign * tr4; + out[oidx4 + 1] = ti1 - isign * ti4; + } + } else { + for (int k = 0; k < l1; k++) { + int idx1 = k * ido; + int idx2 = in_off + 1 + 4 * idx1; + for (int i = 0; i < ido - 1; i += 2) { + int iidx1 = i + idx2; + int iidx2 = iidx1 + ido; + int iidx3 = iidx2 + ido; + int iidx4 = iidx3 + ido; + float i1i = in[iidx1 - 1]; + float i1r = in[iidx1]; + float i2i = in[iidx2 - 1]; + float i2r = in[iidx2]; + float i3i = in[iidx3 - 1]; + float i3r = in[iidx3]; + float i4i = in[iidx4 - 1]; + float i4r = in[iidx4]; + + ti1 = i1r - i3r; + ti2 = i1r + i3r; + ti3 = i2r + i4r; + tr4 = i4r - i2r; + tr1 = i1i - i3i; + tr2 = i1i + i3i; + ti4 = i2i - i4i; + tr3 = i2i + i4i; + cr3 = tr2 - tr3; + ci3 = ti2 - ti3; + cr2 = tr1 + isign * tr4; + cr4 = tr1 - isign * tr4; + ci2 = ti1 + isign * ti4; + ci4 = ti1 - isign * ti4; + + int widx1 = i + iw1; + int widx2 = i + iw2; + int widx3 = i + iw3; + float w1r = wtable[widx1]; + float w1i = isign * wtable[widx1 + 1]; + float w2r = wtable[widx2]; + float w2i = isign * wtable[widx2 + 1]; + float w3r = wtable[widx3]; + float w3i = isign * wtable[widx3 + 1]; + + int oidx1 = out_off + i + idx1; + int oidx2 = oidx1 + idx0; + int oidx3 = oidx2 + idx0; + int oidx4 = oidx3 + idx0; + out[oidx1] = tr2 + tr3; + out[oidx1 + 1] = ti2 + ti3; + out[oidx2] = w1r * cr2 - w1i * ci2; + out[oidx2 + 1] = w1r * ci2 + w1i * cr2; + out[oidx3] = w2r * cr3 - w2i * ci3; + out[oidx3 + 1] = w2r * ci3 + w2i * cr3; + out[oidx4] = w3r * cr4 - w3i * ci4; + out[oidx4 + 1] = w3r * ci4 + w3i * cr4; + } + } + } + } + + /*---------------------------------------------------------------------- + passf5: Complex FFT's forward/backward processing of factor 5; + isign is +1 for backward and -1 for forward transforms + ----------------------------------------------------------------------*/ + void passf5(final int ido, final int l1, final float[] in, final int in_off, final float[] out, final int out_off, final int offset, final int isign) + /* isign==-1 for forward transform and+1 for backward transform */ + { + final float tr11 = 0.309016994374947451262869435595348477f; + final float ti11 = 0.951056516295153531181938433292089030f; + final float tr12 = -0.809016994374947340240566973079694435f; + final float ti12 = 0.587785252292473248125759255344746634f; + float ci2, ci3, ci4, ci5, di3, di4, di5, di2, cr2, cr3, cr5, cr4, ti2, ti3, ti4, ti5, dr3, dr4, dr5, dr2, tr2, tr3, tr4, tr5; + int iw1, iw2, iw3, iw4; + + iw1 = offset; + iw2 = iw1 + ido; + iw3 = iw2 + ido; + iw4 = iw3 + ido; + + int idx0 = l1 * ido; + + if (ido == 2) { + for (int k = 1; k <= l1; ++k) { + int iidx1 = in_off + (5 * k - 4) * ido + 1; + int iidx2 = iidx1 + ido; + int iidx3 = iidx1 - ido; + int iidx4 = iidx2 + ido; + int iidx5 = iidx4 + ido; + + float i1i = in[iidx1 - 1]; + float i1r = in[iidx1]; + float i2i = in[iidx2 - 1]; + float i2r = in[iidx2]; + float i3i = in[iidx3 - 1]; + float i3r = in[iidx3]; + float i4i = in[iidx4 - 1]; + float i4r = in[iidx4]; + float i5i = in[iidx5 - 1]; + float i5r = in[iidx5]; + + ti5 = i1r - i5r; + ti2 = i1r + i5r; + ti4 = i2r - i4r; + ti3 = i2r + i4r; + tr5 = i1i - i5i; + tr2 = i1i + i5i; + tr4 = i2i - i4i; + tr3 = i2i + i4i; + cr2 = i3i + tr11 * tr2 + tr12 * tr3; + ci2 = i3r + tr11 * ti2 + tr12 * ti3; + cr3 = i3i + tr12 * tr2 + tr11 * tr3; + ci3 = i3r + tr12 * ti2 + tr11 * ti3; + cr5 = isign * (ti11 * tr5 + ti12 * tr4); + ci5 = isign * (ti11 * ti5 + ti12 * ti4); + cr4 = isign * (ti12 * tr5 - ti11 * tr4); + ci4 = isign * (ti12 * ti5 - ti11 * ti4); + + int oidx1 = out_off + (k - 1) * ido; + int oidx2 = oidx1 + idx0; + int oidx3 = oidx2 + idx0; + int oidx4 = oidx3 + idx0; + int oidx5 = oidx4 + idx0; + out[oidx1] = i3i + tr2 + tr3; + out[oidx1 + 1] = i3r + ti2 + ti3; + out[oidx2] = cr2 - ci5; + out[oidx2 + 1] = ci2 + cr5; + out[oidx3] = cr3 - ci4; + out[oidx3 + 1] = ci3 + cr4; + out[oidx4] = cr3 + ci4; + out[oidx4 + 1] = ci3 - cr4; + out[oidx5] = cr2 + ci5; + out[oidx5 + 1] = ci2 - cr5; + } + } else { + for (int k = 1; k <= l1; k++) { + int idx1 = in_off + 1 + (k * 5 - 4) * ido; + int idx2 = out_off + (k - 1) * ido; + for (int i = 0; i < ido - 1; i += 2) { + int iidx1 = i + idx1; + int iidx2 = iidx1 + ido; + int iidx3 = iidx1 - ido; + int iidx4 = iidx2 + ido; + int iidx5 = iidx4 + ido; + float i1i = in[iidx1 - 1]; + float i1r = in[iidx1]; + float i2i = in[iidx2 - 1]; + float i2r = in[iidx2]; + float i3i = in[iidx3 - 1]; + float i3r = in[iidx3]; + float i4i = in[iidx4 - 1]; + float i4r = in[iidx4]; + float i5i = in[iidx5 - 1]; + float i5r = in[iidx5]; + + ti5 = i1r - i5r; + ti2 = i1r + i5r; + ti4 = i2r - i4r; + ti3 = i2r + i4r; + tr5 = i1i - i5i; + tr2 = i1i + i5i; + tr4 = i2i - i4i; + tr3 = i2i + i4i; + cr2 = i3i + tr11 * tr2 + tr12 * tr3; + ci2 = i3r + tr11 * ti2 + tr12 * ti3; + cr3 = i3i + tr12 * tr2 + tr11 * tr3; + ci3 = i3r + tr12 * ti2 + tr11 * ti3; + cr5 = isign * (ti11 * tr5 + ti12 * tr4); + ci5 = isign * (ti11 * ti5 + ti12 * ti4); + cr4 = isign * (ti12 * tr5 - ti11 * tr4); + ci4 = isign * (ti12 * ti5 - ti11 * ti4); + dr3 = cr3 - ci4; + dr4 = cr3 + ci4; + di3 = ci3 + cr4; + di4 = ci3 - cr4; + dr5 = cr2 + ci5; + dr2 = cr2 - ci5; + di5 = ci2 - cr5; + di2 = ci2 + cr5; + + int widx1 = i + iw1; + int widx2 = i + iw2; + int widx3 = i + iw3; + int widx4 = i + iw4; + float w1r = wtable[widx1]; + float w1i = isign * wtable[widx1 + 1]; + float w2r = wtable[widx2]; + float w2i = isign * wtable[widx2 + 1]; + float w3r = wtable[widx3]; + float w3i = isign * wtable[widx3 + 1]; + float w4r = wtable[widx4]; + float w4i = isign * wtable[widx4 + 1]; + + int oidx1 = i + idx2; + int oidx2 = oidx1 + idx0; + int oidx3 = oidx2 + idx0; + int oidx4 = oidx3 + idx0; + int oidx5 = oidx4 + idx0; + out[oidx1] = i3i + tr2 + tr3; + out[oidx1 + 1] = i3r + ti2 + ti3; + out[oidx2] = w1r * dr2 - w1i * di2; + out[oidx2 + 1] = w1r * di2 + w1i * dr2; + out[oidx3] = w2r * dr3 - w2i * di3; + out[oidx3 + 1] = w2r * di3 + w2i * dr3; + out[oidx4] = w3r * dr4 - w3i * di4; + out[oidx4 + 1] = w3r * di4 + w3i * dr4; + out[oidx5] = w4r * dr5 - w4i * di5; + out[oidx5 + 1] = w4r * di5 + w4i * dr5; + } + } + } + } + + /*---------------------------------------------------------------------- + passfg: Complex FFT's forward/backward processing of general factor; + isign is +1 for backward and -1 for forward transforms + ----------------------------------------------------------------------*/ + void passfg(final int[] nac, final int ido, final int ip, final int l1, final int idl1, final float[] in, final int in_off, final float[] out, final int out_off, final int offset, final int isign) { + int idij, idlj, idot, ipph, l, jc, lc, idj, idl, inc, idp; + float w1r, w1i, w2i, w2r; + int iw1; + + iw1 = offset; + idot = ido / 2; + ipph = (ip + 1) / 2; + idp = ip * ido; + if (ido >= l1) { + for (int j = 1; j < ipph; j++) { + jc = ip - j; + int idx1 = j * ido; + int idx2 = jc * ido; + for (int k = 0; k < l1; k++) { + int idx3 = k * ido; + int idx4 = idx3 + idx1 * l1; + int idx5 = idx3 + idx2 * l1; + int idx6 = idx3 * ip; + for (int i = 0; i < ido; i++) { + int oidx1 = out_off + i; + float i1r = in[in_off + i + idx1 + idx6]; + float i2r = in[in_off + i + idx2 + idx6]; + out[oidx1 + idx4] = i1r + i2r; + out[oidx1 + idx5] = i1r - i2r; + } + } + } + for (int k = 0; k < l1; k++) { + int idxt1 = k * ido; + int idxt2 = idxt1 * ip; + for (int i = 0; i < ido; i++) { + out[out_off + i + idxt1] = in[in_off + i + idxt2]; + } + } + } else { + for (int j = 1; j < ipph; j++) { + jc = ip - j; + int idxt1 = j * l1 * ido; + int idxt2 = jc * l1 * ido; + int idxt3 = j * ido; + int idxt4 = jc * ido; + for (int i = 0; i < ido; i++) { + for (int k = 0; k < l1; k++) { + int idx1 = k * ido; + int idx2 = idx1 * ip; + int idx3 = out_off + i; + int idx4 = in_off + i; + float i1r = in[idx4 + idxt3 + idx2]; + float i2r = in[idx4 + idxt4 + idx2]; + out[idx3 + idx1 + idxt1] = i1r + i2r; + out[idx3 + idx1 + idxt2] = i1r - i2r; + } + } + } + for (int i = 0; i < ido; i++) { + for (int k = 0; k < l1; k++) { + int idx1 = k * ido; + out[out_off + i + idx1] = in[in_off + i + idx1 * ip]; + } + } + } + + idl = 2 - ido; + inc = 0; + int idxt0 = (ip - 1) * idl1; + for (l = 1; l < ipph; l++) { + lc = ip - l; + idl += ido; + int idxt1 = l * idl1; + int idxt2 = lc * idl1; + int idxt3 = idl + iw1; + w1r = wtable[idxt3 - 2]; + w1i = isign * wtable[idxt3 - 1]; + for (int ik = 0; ik < idl1; ik++) { + int idx1 = in_off + ik; + int idx2 = out_off + ik; + in[idx1 + idxt1] = out[idx2] + w1r * out[idx2 + idl1]; + in[idx1 + idxt2] = w1i * out[idx2 + idxt0]; + } + idlj = idl; + inc += ido; + for (int j = 2; j < ipph; j++) { + jc = ip - j; + idlj += inc; + if (idlj > idp) + idlj -= idp; + int idxt4 = idlj + iw1; + w2r = wtable[idxt4 - 2]; + w2i = isign * wtable[idxt4 - 1]; + int idxt5 = j * idl1; + int idxt6 = jc * idl1; + for (int ik = 0; ik < idl1; ik++) { + int idx1 = in_off + ik; + int idx2 = out_off + ik; + in[idx1 + idxt1] += w2r * out[idx2 + idxt5]; + in[idx1 + idxt2] += w2i * out[idx2 + idxt6]; + } + } + } + for (int j = 1; j < ipph; j++) { + int idxt1 = j * idl1; + for (int ik = 0; ik < idl1; ik++) { + int idx1 = out_off + ik; + out[idx1] += out[idx1 + idxt1]; + } + } + for (int j = 1; j < ipph; j++) { + jc = ip - j; + int idx1 = j * idl1; + int idx2 = jc * idl1; + for (int ik = 1; ik < idl1; ik += 2) { + int idx3 = out_off + ik; + int idx4 = in_off + ik; + int iidx1 = idx4 + idx1; + int iidx2 = idx4 + idx2; + float i1i = in[iidx1 - 1]; + float i1r = in[iidx1]; + float i2i = in[iidx2 - 1]; + float i2r = in[iidx2]; + + int oidx1 = idx3 + idx1; + int oidx2 = idx3 + idx2; + out[oidx1 - 1] = i1i - i2r; + out[oidx2 - 1] = i1i + i2r; + out[oidx1] = i1r + i2i; + out[oidx2] = i1r - i2i; + } + } + nac[0] = 1; + if (ido == 2) + return; + nac[0] = 0; + System.arraycopy(out, out_off, in, in_off, idl1); + int idx0 = l1 * ido; + for (int j = 1; j < ip; j++) { + int idx1 = j * idx0; + for (int k = 0; k < l1; k++) { + int idx2 = k * ido; + int oidx1 = out_off + idx2 + idx1; + int iidx1 = in_off + idx2 + idx1; + in[iidx1] = out[oidx1]; + in[iidx1 + 1] = out[oidx1 + 1]; + } + } + if (idot <= l1) { + idij = 0; + for (int j = 1; j < ip; j++) { + idij += 2; + int idx1 = j * l1 * ido; + for (int i = 3; i < ido; i += 2) { + idij += 2; + int idx2 = idij + iw1 - 1; + w1r = wtable[idx2 - 1]; + w1i = isign * wtable[idx2]; + int idx3 = in_off + i; + int idx4 = out_off + i; + for (int k = 0; k < l1; k++) { + int idx5 = k * ido + idx1; + int iidx1 = idx3 + idx5; + int oidx1 = idx4 + idx5; + float o1i = out[oidx1 - 1]; + float o1r = out[oidx1]; + in[iidx1 - 1] = w1r * o1i - w1i * o1r; + in[iidx1] = w1r * o1r + w1i * o1i; + } + } + } + } else { + idj = 2 - ido; + for (int j = 1; j < ip; j++) { + idj += ido; + int idx1 = j * l1 * ido; + for (int k = 0; k < l1; k++) { + idij = idj; + int idx3 = k * ido + idx1; + for (int i = 3; i < ido; i += 2) { + idij += 2; + int idx2 = idij - 1 + iw1; + w1r = wtable[idx2 - 1]; + w1i = isign * wtable[idx2]; + int iidx1 = in_off + i + idx3; + int oidx1 = out_off + i + idx3; + float o1i = out[oidx1 - 1]; + float o1r = out[oidx1]; + in[iidx1 - 1] = w1r * o1i - w1i * o1r; + in[iidx1] = w1r * o1r + w1i * o1i; + } + } + } + } + } + + private void cftfsub(int n, float[] a, int offa, int[] ip, int nw, float[] w) { + if (n > 8) { + if (n > 32) { + cftf1st(n, a, offa, w, nw - (n >> 2)); + if ((ConcurrencyUtils.getNumberOfThreads() > 1) && (n > ConcurrencyUtils.getThreadsBeginN_1D_FFT_2Threads())) { + cftrec4_th(n, a, offa, nw, w); + } else if (n > 512) { + cftrec4(n, a, offa, nw, w); + } else if (n > 128) { + cftleaf(n, 1, a, offa, nw, w); + } else { + cftfx41(n, a, offa, nw, w); + } + bitrv2(n, ip, a, offa); + } else if (n == 32) { + cftf161(a, offa, w, nw - 8); + bitrv216(a, offa); + } else { + cftf081(a, offa, w, 0); + bitrv208(a, offa); + } + } else if (n == 8) { + cftf040(a, offa); + } else if (n == 4) { + cftxb020(a, offa); + } + } + + private void cftbsub(int n, float[] a, int offa, int[] ip, int nw, float[] w) { + if (n > 8) { + if (n > 32) { + cftb1st(n, a, offa, w, nw - (n >> 2)); + if ((ConcurrencyUtils.getNumberOfThreads() > 1) && (n > ConcurrencyUtils.getThreadsBeginN_1D_FFT_2Threads())) { + cftrec4_th(n, a, offa, nw, w); + } else if (n > 512) { + cftrec4(n, a, offa, nw, w); + } else if (n > 128) { + cftleaf(n, 1, a, offa, nw, w); + } else { + cftfx41(n, a, offa, nw, w); + } + bitrv2conj(n, ip, a, offa); + } else if (n == 32) { + cftf161(a, offa, w, nw - 8); + bitrv216neg(a, offa); + } else { + cftf081(a, offa, w, 0); + bitrv208neg(a, offa); + } + } else if (n == 8) { + cftb040(a, offa); + } else if (n == 4) { + cftxb020(a, offa); + } + } + + private void bitrv2(int n, int[] ip, float[] a, int offa) { + int j1, k1, l, m, nh, nm; + float xr, xi, yr, yi; + int idx0, idx1, idx2; + + m = 1; + for (l = n >> 2; l > 8; l >>= 2) { + m <<= 1; + } + nh = n >> 1; + nm = 4 * m; + if (l == 8) { + for (int k = 0; k < m; k++) { + idx0 = 4 * k; + for (int j = 0; j < k; j++) { + j1 = 4 * j + 2 * ip[m + k]; + k1 = idx0 + 2 * ip[m + j]; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 += 2 * nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 -= nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 += 2 * nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nh; + k1 += 2; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nm; + k1 -= 2 * nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nm; + k1 += nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nm; + k1 -= 2 * nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += 2; + k1 += nh; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 += 2 * nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 -= nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 += 2 * nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nh; + k1 -= 2; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nm; + k1 -= 2 * nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nm; + k1 += nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nm; + k1 -= 2 * nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + } + k1 = idx0 + 2 * ip[m + k]; + j1 = k1 + 2; + k1 += nh; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 += 2 * nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 -= nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= 2; + k1 -= nh; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nh + 2; + k1 += nh + 2; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nh - nm; + k1 += 2 * nm - 2; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + } + } else { + for (int k = 0; k < m; k++) { + idx0 = 4 * k; + for (int j = 0; j < k; j++) { + j1 = 4 * j + ip[m + k]; + k1 = idx0 + ip[m + j]; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 += nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nh; + k1 += 2; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nm; + k1 -= nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += 2; + k1 += nh; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 += nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nh; + k1 -= 2; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nm; + k1 -= nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + } + k1 = idx0 + ip[m + k]; + j1 = k1 + 2; + k1 += nh; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 += nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = a[idx1 + 1]; + yr = a[idx2]; + yi = a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + } + } + } + + private void bitrv2conj(int n, int[] ip, float[] a, int offa) { + int j1, k1, l, m, nh, nm; + float xr, xi, yr, yi; + int idx0, idx1, idx2; + + m = 1; + for (l = n >> 2; l > 8; l >>= 2) { + m <<= 1; + } + nh = n >> 1; + nm = 4 * m; + if (l == 8) { + for (int k = 0; k < m; k++) { + idx0 = 4 * k; + for (int j = 0; j < k; j++) { + j1 = 4 * j + 2 * ip[m + k]; + k1 = idx0 + 2 * ip[m + j]; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 += 2 * nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 -= nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 += 2 * nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nh; + k1 += 2; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nm; + k1 -= 2 * nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nm; + k1 += nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nm; + k1 -= 2 * nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += 2; + k1 += nh; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 += 2 * nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 -= nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 += 2 * nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nh; + k1 -= 2; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nm; + k1 -= 2 * nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nm; + k1 += nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nm; + k1 -= 2 * nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + } + k1 = idx0 + 2 * ip[m + k]; + j1 = k1 + 2; + k1 += nh; + idx1 = offa + j1; + idx2 = offa + k1; + a[idx1 - 1] = -a[idx1 - 1]; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + a[idx2 + 3] = -a[idx2 + 3]; + j1 += nm; + k1 += 2 * nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 -= nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= 2; + k1 -= nh; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nh + 2; + k1 += nh + 2; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nh - nm; + k1 += 2 * nm - 2; + idx1 = offa + j1; + idx2 = offa + k1; + a[idx1 - 1] = -a[idx1 - 1]; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + a[idx2 + 3] = -a[idx2 + 3]; + } + } else { + for (int k = 0; k < m; k++) { + idx0 = 4 * k; + for (int j = 0; j < k; j++) { + j1 = 4 * j + ip[m + k]; + k1 = idx0 + ip[m + j]; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 += nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nh; + k1 += 2; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nm; + k1 -= nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += 2; + k1 += nh; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 += nm; + k1 += nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nh; + k1 -= 2; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + j1 -= nm; + k1 -= nm; + idx1 = offa + j1; + idx2 = offa + k1; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + } + k1 = idx0 + ip[m + k]; + j1 = k1 + 2; + k1 += nh; + idx1 = offa + j1; + idx2 = offa + k1; + a[idx1 - 1] = -a[idx1 - 1]; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + a[idx2 + 3] = -a[idx2 + 3]; + j1 += nm; + k1 += nm; + idx1 = offa + j1; + idx2 = offa + k1; + a[idx1 - 1] = -a[idx1 - 1]; + xr = a[idx1]; + xi = -a[idx1 + 1]; + yr = a[idx2]; + yi = -a[idx2 + 1]; + a[idx1] = yr; + a[idx1 + 1] = yi; + a[idx2] = xr; + a[idx2 + 1] = xi; + a[idx2 + 3] = -a[idx2 + 3]; + } + } + } + + private void bitrv216(float[] a, int offa) { + float x1r, x1i, x2r, x2i, x3r, x3i, x4r, x4i, x5r, x5i, x7r, x7i, x8r, x8i, x10r, x10i, x11r, x11i, x12r, x12i, x13r, x13i, x14r, x14i; + + x1r = a[offa + 2]; + x1i = a[offa + 3]; + x2r = a[offa + 4]; + x2i = a[offa + 5]; + x3r = a[offa + 6]; + x3i = a[offa + 7]; + x4r = a[offa + 8]; + x4i = a[offa + 9]; + x5r = a[offa + 10]; + x5i = a[offa + 11]; + x7r = a[offa + 14]; + x7i = a[offa + 15]; + x8r = a[offa + 16]; + x8i = a[offa + 17]; + x10r = a[offa + 20]; + x10i = a[offa + 21]; + x11r = a[offa + 22]; + x11i = a[offa + 23]; + x12r = a[offa + 24]; + x12i = a[offa + 25]; + x13r = a[offa + 26]; + x13i = a[offa + 27]; + x14r = a[offa + 28]; + x14i = a[offa + 29]; + a[offa + 2] = x8r; + a[offa + 3] = x8i; + a[offa + 4] = x4r; + a[offa + 5] = x4i; + a[offa + 6] = x12r; + a[offa + 7] = x12i; + a[offa + 8] = x2r; + a[offa + 9] = x2i; + a[offa + 10] = x10r; + a[offa + 11] = x10i; + a[offa + 14] = x14r; + a[offa + 15] = x14i; + a[offa + 16] = x1r; + a[offa + 17] = x1i; + a[offa + 20] = x5r; + a[offa + 21] = x5i; + a[offa + 22] = x13r; + a[offa + 23] = x13i; + a[offa + 24] = x3r; + a[offa + 25] = x3i; + a[offa + 26] = x11r; + a[offa + 27] = x11i; + a[offa + 28] = x7r; + a[offa + 29] = x7i; + } + + private void bitrv216neg(float[] a, int offa) { + float x1r, x1i, x2r, x2i, x3r, x3i, x4r, x4i, x5r, x5i, x6r, x6i, x7r, x7i, x8r, x8i, x9r, x9i, x10r, x10i, x11r, x11i, x12r, x12i, x13r, x13i, x14r, x14i, x15r, x15i; + + x1r = a[offa + 2]; + x1i = a[offa + 3]; + x2r = a[offa + 4]; + x2i = a[offa + 5]; + x3r = a[offa + 6]; + x3i = a[offa + 7]; + x4r = a[offa + 8]; + x4i = a[offa + 9]; + x5r = a[offa + 10]; + x5i = a[offa + 11]; + x6r = a[offa + 12]; + x6i = a[offa + 13]; + x7r = a[offa + 14]; + x7i = a[offa + 15]; + x8r = a[offa + 16]; + x8i = a[offa + 17]; + x9r = a[offa + 18]; + x9i = a[offa + 19]; + x10r = a[offa + 20]; + x10i = a[offa + 21]; + x11r = a[offa + 22]; + x11i = a[offa + 23]; + x12r = a[offa + 24]; + x12i = a[offa + 25]; + x13r = a[offa + 26]; + x13i = a[offa + 27]; + x14r = a[offa + 28]; + x14i = a[offa + 29]; + x15r = a[offa + 30]; + x15i = a[offa + 31]; + a[offa + 2] = x15r; + a[offa + 3] = x15i; + a[offa + 4] = x7r; + a[offa + 5] = x7i; + a[offa + 6] = x11r; + a[offa + 7] = x11i; + a[offa + 8] = x3r; + a[offa + 9] = x3i; + a[offa + 10] = x13r; + a[offa + 11] = x13i; + a[offa + 12] = x5r; + a[offa + 13] = x5i; + a[offa + 14] = x9r; + a[offa + 15] = x9i; + a[offa + 16] = x1r; + a[offa + 17] = x1i; + a[offa + 18] = x14r; + a[offa + 19] = x14i; + a[offa + 20] = x6r; + a[offa + 21] = x6i; + a[offa + 22] = x10r; + a[offa + 23] = x10i; + a[offa + 24] = x2r; + a[offa + 25] = x2i; + a[offa + 26] = x12r; + a[offa + 27] = x12i; + a[offa + 28] = x4r; + a[offa + 29] = x4i; + a[offa + 30] = x8r; + a[offa + 31] = x8i; + } + + private void bitrv208(float[] a, int offa) { + float x1r, x1i, x3r, x3i, x4r, x4i, x6r, x6i; + + x1r = a[offa + 2]; + x1i = a[offa + 3]; + x3r = a[offa + 6]; + x3i = a[offa + 7]; + x4r = a[offa + 8]; + x4i = a[offa + 9]; + x6r = a[offa + 12]; + x6i = a[offa + 13]; + a[offa + 2] = x4r; + a[offa + 3] = x4i; + a[offa + 6] = x6r; + a[offa + 7] = x6i; + a[offa + 8] = x1r; + a[offa + 9] = x1i; + a[offa + 12] = x3r; + a[offa + 13] = x3i; + } + + private void bitrv208neg(float[] a, int offa) { + float x1r, x1i, x2r, x2i, x3r, x3i, x4r, x4i, x5r, x5i, x6r, x6i, x7r, x7i; + + x1r = a[offa + 2]; + x1i = a[offa + 3]; + x2r = a[offa + 4]; + x2i = a[offa + 5]; + x3r = a[offa + 6]; + x3i = a[offa + 7]; + x4r = a[offa + 8]; + x4i = a[offa + 9]; + x5r = a[offa + 10]; + x5i = a[offa + 11]; + x6r = a[offa + 12]; + x6i = a[offa + 13]; + x7r = a[offa + 14]; + x7i = a[offa + 15]; + a[offa + 2] = x7r; + a[offa + 3] = x7i; + a[offa + 4] = x3r; + a[offa + 5] = x3i; + a[offa + 6] = x5r; + a[offa + 7] = x5i; + a[offa + 8] = x1r; + a[offa + 9] = x1i; + a[offa + 10] = x6r; + a[offa + 11] = x6i; + a[offa + 12] = x2r; + a[offa + 13] = x2i; + a[offa + 14] = x4r; + a[offa + 15] = x4i; + } + + private void cftf1st(int n, float[] a, int offa, float[] w, int startw) { + int j0, j1, j2, j3, k, m, mh; + float wn4r, csc1, csc3, wk1r, wk1i, wk3r, wk3i, wd1r, wd1i, wd3r, wd3i; + float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i, y0r, y0i, y1r, y1i, y2r, y2i, y3r, y3i; + int idx0, idx1, idx2, idx3, idx4, idx5; + mh = n >> 3; + m = 2 * mh; + j1 = m; + j2 = j1 + m; + j3 = j2 + m; + idx1 = offa + j1; + idx2 = offa + j2; + idx3 = offa + j3; + x0r = a[offa] + a[idx2]; + x0i = a[offa + 1] + a[idx2 + 1]; + x1r = a[offa] - a[idx2]; + x1i = a[offa + 1] - a[idx2 + 1]; + x2r = a[idx1] + a[idx3]; + x2i = a[idx1 + 1] + a[idx3 + 1]; + x3r = a[idx1] - a[idx3]; + x3i = a[idx1 + 1] - a[idx3 + 1]; + a[offa] = x0r + x2r; + a[offa + 1] = x0i + x2i; + a[idx1] = x0r - x2r; + a[idx1 + 1] = x0i - x2i; + a[idx2] = x1r - x3i; + a[idx2 + 1] = x1i + x3r; + a[idx3] = x1r + x3i; + a[idx3 + 1] = x1i - x3r; + wn4r = w[startw + 1]; + csc1 = w[startw + 2]; + csc3 = w[startw + 3]; + wd1r = 1; + wd1i = 0; + wd3r = 1; + wd3i = 0; + k = 0; + for (int j = 2; j < mh - 2; j += 4) { + k += 4; + idx4 = startw + k; + wk1r = csc1 * (wd1r + w[idx4]); + wk1i = csc1 * (wd1i + w[idx4 + 1]); + wk3r = csc3 * (wd3r + w[idx4 + 2]); + wk3i = csc3 * (wd3i + w[idx4 + 3]); + wd1r = w[idx4]; + wd1i = w[idx4 + 1]; + wd3r = w[idx4 + 2]; + wd3i = w[idx4 + 3]; + j1 = j + m; + j2 = j1 + m; + j3 = j2 + m; + idx1 = offa + j1; + idx2 = offa + j2; + idx3 = offa + j3; + idx5 = offa + j; + x0r = a[idx5] + a[idx2]; + x0i = a[idx5 + 1] + a[idx2 + 1]; + x1r = a[idx5] - a[idx2]; + x1i = a[idx5 + 1] - a[idx2 + 1]; + y0r = a[idx5 + 2] + a[idx2 + 2]; + y0i = a[idx5 + 3] + a[idx2 + 3]; + y1r = a[idx5 + 2] - a[idx2 + 2]; + y1i = a[idx5 + 3] - a[idx2 + 3]; + x2r = a[idx1] + a[idx3]; + x2i = a[idx1 + 1] + a[idx3 + 1]; + x3r = a[idx1] - a[idx3]; + x3i = a[idx1 + 1] - a[idx3 + 1]; + y2r = a[idx1 + 2] + a[idx3 + 2]; + y2i = a[idx1 + 3] + a[idx3 + 3]; + y3r = a[idx1 + 2] - a[idx3 + 2]; + y3i = a[idx1 + 3] - a[idx3 + 3]; + a[idx5] = x0r + x2r; + a[idx5 + 1] = x0i + x2i; + a[idx5 + 2] = y0r + y2r; + a[idx5 + 3] = y0i + y2i; + a[idx1] = x0r - x2r; + a[idx1 + 1] = x0i - x2i; + a[idx1 + 2] = y0r - y2r; + a[idx1 + 3] = y0i - y2i; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[idx2] = wk1r * x0r - wk1i * x0i; + a[idx2 + 1] = wk1r * x0i + wk1i * x0r; + x0r = y1r - y3i; + x0i = y1i + y3r; + a[idx2 + 2] = wd1r * x0r - wd1i * x0i; + a[idx2 + 3] = wd1r * x0i + wd1i * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + a[idx3] = wk3r * x0r + wk3i * x0i; + a[idx3 + 1] = wk3r * x0i - wk3i * x0r; + x0r = y1r + y3i; + x0i = y1i - y3r; + a[idx3 + 2] = wd3r * x0r + wd3i * x0i; + a[idx3 + 3] = wd3r * x0i - wd3i * x0r; + j0 = m - j; + j1 = j0 + m; + j2 = j1 + m; + j3 = j2 + m; + idx0 = offa + j0; + idx1 = offa + j1; + idx2 = offa + j2; + idx3 = offa + j3; + x0r = a[idx0] + a[idx2]; + x0i = a[idx0 + 1] + a[idx2 + 1]; + x1r = a[idx0] - a[idx2]; + x1i = a[idx0 + 1] - a[idx2 + 1]; + y0r = a[idx0 - 2] + a[idx2 - 2]; + y0i = a[idx0 - 1] + a[idx2 - 1]; + y1r = a[idx0 - 2] - a[idx2 - 2]; + y1i = a[idx0 - 1] - a[idx2 - 1]; + x2r = a[idx1] + a[idx3]; + x2i = a[idx1 + 1] + a[idx3 + 1]; + x3r = a[idx1] - a[idx3]; + x3i = a[idx1 + 1] - a[idx3 + 1]; + y2r = a[idx1 - 2] + a[idx3 - 2]; + y2i = a[idx1 - 1] + a[idx3 - 1]; + y3r = a[idx1 - 2] - a[idx3 - 2]; + y3i = a[idx1 - 1] - a[idx3 - 1]; + a[idx0] = x0r + x2r; + a[idx0 + 1] = x0i + x2i; + a[idx0 - 2] = y0r + y2r; + a[idx0 - 1] = y0i + y2i; + a[idx1] = x0r - x2r; + a[idx1 + 1] = x0i - x2i; + a[idx1 - 2] = y0r - y2r; + a[idx1 - 1] = y0i - y2i; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[idx2] = wk1i * x0r - wk1r * x0i; + a[idx2 + 1] = wk1i * x0i + wk1r * x0r; + x0r = y1r - y3i; + x0i = y1i + y3r; + a[idx2 - 2] = wd1i * x0r - wd1r * x0i; + a[idx2 - 1] = wd1i * x0i + wd1r * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + a[idx3] = wk3i * x0r + wk3r * x0i; + a[idx3 + 1] = wk3i * x0i - wk3r * x0r; + x0r = y1r + y3i; + x0i = y1i - y3r; + a[offa + j3 - 2] = wd3i * x0r + wd3r * x0i; + a[offa + j3 - 1] = wd3i * x0i - wd3r * x0r; + } + wk1r = csc1 * (wd1r + wn4r); + wk1i = csc1 * (wd1i + wn4r); + wk3r = csc3 * (wd3r - wn4r); + wk3i = csc3 * (wd3i - wn4r); + j0 = mh; + j1 = j0 + m; + j2 = j1 + m; + j3 = j2 + m; + idx0 = offa + j0; + idx1 = offa + j1; + idx2 = offa + j2; + idx3 = offa + j3; + x0r = a[idx0 - 2] + a[idx2 - 2]; + x0i = a[idx0 - 1] + a[idx2 - 1]; + x1r = a[idx0 - 2] - a[idx2 - 2]; + x1i = a[idx0 - 1] - a[idx2 - 1]; + x2r = a[idx1 - 2] + a[idx3 - 2]; + x2i = a[idx1 - 1] + a[idx3 - 1]; + x3r = a[idx1 - 2] - a[idx3 - 2]; + x3i = a[idx1 - 1] - a[idx3 - 1]; + a[idx0 - 2] = x0r + x2r; + a[idx0 - 1] = x0i + x2i; + a[idx1 - 2] = x0r - x2r; + a[idx1 - 1] = x0i - x2i; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[idx2 - 2] = wk1r * x0r - wk1i * x0i; + a[idx2 - 1] = wk1r * x0i + wk1i * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + a[idx3 - 2] = wk3r * x0r + wk3i * x0i; + a[idx3 - 1] = wk3r * x0i - wk3i * x0r; + x0r = a[idx0] + a[idx2]; + x0i = a[idx0 + 1] + a[idx2 + 1]; + x1r = a[idx0] - a[idx2]; + x1i = a[idx0 + 1] - a[idx2 + 1]; + x2r = a[idx1] + a[idx3]; + x2i = a[idx1 + 1] + a[idx3 + 1]; + x3r = a[idx1] - a[idx3]; + x3i = a[idx1 + 1] - a[idx3 + 1]; + a[idx0] = x0r + x2r; + a[idx0 + 1] = x0i + x2i; + a[idx1] = x0r - x2r; + a[idx1 + 1] = x0i - x2i; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[idx2] = wn4r * (x0r - x0i); + a[idx2 + 1] = wn4r * (x0i + x0r); + x0r = x1r + x3i; + x0i = x1i - x3r; + a[idx3] = -wn4r * (x0r + x0i); + a[idx3 + 1] = -wn4r * (x0i - x0r); + x0r = a[idx0 + 2] + a[idx2 + 2]; + x0i = a[idx0 + 3] + a[idx2 + 3]; + x1r = a[idx0 + 2] - a[idx2 + 2]; + x1i = a[idx0 + 3] - a[idx2 + 3]; + x2r = a[idx1 + 2] + a[idx3 + 2]; + x2i = a[idx1 + 3] + a[idx3 + 3]; + x3r = a[idx1 + 2] - a[idx3 + 2]; + x3i = a[idx1 + 3] - a[idx3 + 3]; + a[idx0 + 2] = x0r + x2r; + a[idx0 + 3] = x0i + x2i; + a[idx1 + 2] = x0r - x2r; + a[idx1 + 3] = x0i - x2i; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[idx2 + 2] = wk1i * x0r - wk1r * x0i; + a[idx2 + 3] = wk1i * x0i + wk1r * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + a[idx3 + 2] = wk3i * x0r + wk3r * x0i; + a[idx3 + 3] = wk3i * x0i - wk3r * x0r; + } + + private void cftb1st(int n, float[] a, int offa, float[] w, int startw) { + int j0, j1, j2, j3, k, m, mh; + float wn4r, csc1, csc3, wk1r, wk1i, wk3r, wk3i, wd1r, wd1i, wd3r, wd3i; + float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i, y0r, y0i, y1r, y1i, y2r, y2i, y3r, y3i; + int idx0, idx1, idx2, idx3, idx4, idx5; + mh = n >> 3; + m = 2 * mh; + j1 = m; + j2 = j1 + m; + j3 = j2 + m; + idx1 = offa + j1; + idx2 = offa + j2; + idx3 = offa + j3; + + x0r = a[offa] + a[idx2]; + x0i = -a[offa + 1] - a[idx2 + 1]; + x1r = a[offa] - a[idx2]; + x1i = -a[offa + 1] + a[idx2 + 1]; + x2r = a[idx1] + a[idx3]; + x2i = a[idx1 + 1] + a[idx3 + 1]; + x3r = a[idx1] - a[idx3]; + x3i = a[idx1 + 1] - a[idx3 + 1]; + a[offa] = x0r + x2r; + a[offa + 1] = x0i - x2i; + a[idx1] = x0r - x2r; + a[idx1 + 1] = x0i + x2i; + a[idx2] = x1r + x3i; + a[idx2 + 1] = x1i + x3r; + a[idx3] = x1r - x3i; + a[idx3 + 1] = x1i - x3r; + wn4r = w[startw + 1]; + csc1 = w[startw + 2]; + csc3 = w[startw + 3]; + wd1r = 1; + wd1i = 0; + wd3r = 1; + wd3i = 0; + k = 0; + for (int j = 2; j < mh - 2; j += 4) { + k += 4; + idx4 = startw + k; + wk1r = csc1 * (wd1r + w[idx4]); + wk1i = csc1 * (wd1i + w[idx4 + 1]); + wk3r = csc3 * (wd3r + w[idx4 + 2]); + wk3i = csc3 * (wd3i + w[idx4 + 3]); + wd1r = w[idx4]; + wd1i = w[idx4 + 1]; + wd3r = w[idx4 + 2]; + wd3i = w[idx4 + 3]; + j1 = j + m; + j2 = j1 + m; + j3 = j2 + m; + idx1 = offa + j1; + idx2 = offa + j2; + idx3 = offa + j3; + idx5 = offa + j; + x0r = a[idx5] + a[idx2]; + x0i = -a[idx5 + 1] - a[idx2 + 1]; + x1r = a[idx5] - a[offa + j2]; + x1i = -a[idx5 + 1] + a[idx2 + 1]; + y0r = a[idx5 + 2] + a[idx2 + 2]; + y0i = -a[idx5 + 3] - a[idx2 + 3]; + y1r = a[idx5 + 2] - a[idx2 + 2]; + y1i = -a[idx5 + 3] + a[idx2 + 3]; + x2r = a[idx1] + a[idx3]; + x2i = a[idx1 + 1] + a[idx3 + 1]; + x3r = a[idx1] - a[idx3]; + x3i = a[idx1 + 1] - a[idx3 + 1]; + y2r = a[idx1 + 2] + a[idx3 + 2]; + y2i = a[idx1 + 3] + a[idx3 + 3]; + y3r = a[idx1 + 2] - a[idx3 + 2]; + y3i = a[idx1 + 3] - a[idx3 + 3]; + a[idx5] = x0r + x2r; + a[idx5 + 1] = x0i - x2i; + a[idx5 + 2] = y0r + y2r; + a[idx5 + 3] = y0i - y2i; + a[idx1] = x0r - x2r; + a[idx1 + 1] = x0i + x2i; + a[idx1 + 2] = y0r - y2r; + a[idx1 + 3] = y0i + y2i; + x0r = x1r + x3i; + x0i = x1i + x3r; + a[idx2] = wk1r * x0r - wk1i * x0i; + a[idx2 + 1] = wk1r * x0i + wk1i * x0r; + x0r = y1r + y3i; + x0i = y1i + y3r; + a[idx2 + 2] = wd1r * x0r - wd1i * x0i; + a[idx2 + 3] = wd1r * x0i + wd1i * x0r; + x0r = x1r - x3i; + x0i = x1i - x3r; + a[idx3] = wk3r * x0r + wk3i * x0i; + a[idx3 + 1] = wk3r * x0i - wk3i * x0r; + x0r = y1r - y3i; + x0i = y1i - y3r; + a[idx3 + 2] = wd3r * x0r + wd3i * x0i; + a[idx3 + 3] = wd3r * x0i - wd3i * x0r; + j0 = m - j; + j1 = j0 + m; + j2 = j1 + m; + j3 = j2 + m; + idx0 = offa + j0; + idx1 = offa + j1; + idx2 = offa + j2; + idx3 = offa + j3; + x0r = a[idx0] + a[idx2]; + x0i = -a[idx0 + 1] - a[idx2 + 1]; + x1r = a[idx0] - a[idx2]; + x1i = -a[idx0 + 1] + a[idx2 + 1]; + y0r = a[idx0 - 2] + a[idx2 - 2]; + y0i = -a[idx0 - 1] - a[idx2 - 1]; + y1r = a[idx0 - 2] - a[idx2 - 2]; + y1i = -a[idx0 - 1] + a[idx2 - 1]; + x2r = a[idx1] + a[idx3]; + x2i = a[idx1 + 1] + a[idx3 + 1]; + x3r = a[idx1] - a[idx3]; + x3i = a[idx1 + 1] - a[idx3 + 1]; + y2r = a[idx1 - 2] + a[idx3 - 2]; + y2i = a[idx1 - 1] + a[idx3 - 1]; + y3r = a[idx1 - 2] - a[idx3 - 2]; + y3i = a[idx1 - 1] - a[idx3 - 1]; + a[idx0] = x0r + x2r; + a[idx0 + 1] = x0i - x2i; + a[idx0 - 2] = y0r + y2r; + a[idx0 - 1] = y0i - y2i; + a[idx1] = x0r - x2r; + a[idx1 + 1] = x0i + x2i; + a[idx1 - 2] = y0r - y2r; + a[idx1 - 1] = y0i + y2i; + x0r = x1r + x3i; + x0i = x1i + x3r; + a[idx2] = wk1i * x0r - wk1r * x0i; + a[idx2 + 1] = wk1i * x0i + wk1r * x0r; + x0r = y1r + y3i; + x0i = y1i + y3r; + a[idx2 - 2] = wd1i * x0r - wd1r * x0i; + a[idx2 - 1] = wd1i * x0i + wd1r * x0r; + x0r = x1r - x3i; + x0i = x1i - x3r; + a[idx3] = wk3i * x0r + wk3r * x0i; + a[idx3 + 1] = wk3i * x0i - wk3r * x0r; + x0r = y1r - y3i; + x0i = y1i - y3r; + a[idx3 - 2] = wd3i * x0r + wd3r * x0i; + a[idx3 - 1] = wd3i * x0i - wd3r * x0r; + } + wk1r = csc1 * (wd1r + wn4r); + wk1i = csc1 * (wd1i + wn4r); + wk3r = csc3 * (wd3r - wn4r); + wk3i = csc3 * (wd3i - wn4r); + j0 = mh; + j1 = j0 + m; + j2 = j1 + m; + j3 = j2 + m; + idx0 = offa + j0; + idx1 = offa + j1; + idx2 = offa + j2; + idx3 = offa + j3; + x0r = a[idx0 - 2] + a[idx2 - 2]; + x0i = -a[idx0 - 1] - a[idx2 - 1]; + x1r = a[idx0 - 2] - a[idx2 - 2]; + x1i = -a[idx0 - 1] + a[idx2 - 1]; + x2r = a[idx1 - 2] + a[idx3 - 2]; + x2i = a[idx1 - 1] + a[idx3 - 1]; + x3r = a[idx1 - 2] - a[idx3 - 2]; + x3i = a[idx1 - 1] - a[idx3 - 1]; + a[idx0 - 2] = x0r + x2r; + a[idx0 - 1] = x0i - x2i; + a[idx1 - 2] = x0r - x2r; + a[idx1 - 1] = x0i + x2i; + x0r = x1r + x3i; + x0i = x1i + x3r; + a[idx2 - 2] = wk1r * x0r - wk1i * x0i; + a[idx2 - 1] = wk1r * x0i + wk1i * x0r; + x0r = x1r - x3i; + x0i = x1i - x3r; + a[idx3 - 2] = wk3r * x0r + wk3i * x0i; + a[idx3 - 1] = wk3r * x0i - wk3i * x0r; + x0r = a[idx0] + a[idx2]; + x0i = -a[idx0 + 1] - a[idx2 + 1]; + x1r = a[idx0] - a[idx2]; + x1i = -a[idx0 + 1] + a[idx2 + 1]; + x2r = a[idx1] + a[idx3]; + x2i = a[idx1 + 1] + a[idx3 + 1]; + x3r = a[idx1] - a[idx3]; + x3i = a[idx1 + 1] - a[idx3 + 1]; + a[idx0] = x0r + x2r; + a[idx0 + 1] = x0i - x2i; + a[idx1] = x0r - x2r; + a[idx1 + 1] = x0i + x2i; + x0r = x1r + x3i; + x0i = x1i + x3r; + a[idx2] = wn4r * (x0r - x0i); + a[idx2 + 1] = wn4r * (x0i + x0r); + x0r = x1r - x3i; + x0i = x1i - x3r; + a[idx3] = -wn4r * (x0r + x0i); + a[idx3 + 1] = -wn4r * (x0i - x0r); + x0r = a[idx0 + 2] + a[idx2 + 2]; + x0i = -a[idx0 + 3] - a[idx2 + 3]; + x1r = a[idx0 + 2] - a[idx2 + 2]; + x1i = -a[idx0 + 3] + a[idx2 + 3]; + x2r = a[idx1 + 2] + a[idx3 + 2]; + x2i = a[idx1 + 3] + a[idx3 + 3]; + x3r = a[idx1 + 2] - a[idx3 + 2]; + x3i = a[idx1 + 3] - a[idx3 + 3]; + a[idx0 + 2] = x0r + x2r; + a[idx0 + 3] = x0i - x2i; + a[idx1 + 2] = x0r - x2r; + a[idx1 + 3] = x0i + x2i; + x0r = x1r + x3i; + x0i = x1i + x3r; + a[idx2 + 2] = wk1i * x0r - wk1r * x0i; + a[idx2 + 3] = wk1i * x0i + wk1r * x0r; + x0r = x1r - x3i; + x0i = x1i - x3r; + a[idx3 + 2] = wk3i * x0r + wk3r * x0i; + a[idx3 + 3] = wk3i * x0i - wk3r * x0r; + } + + private void cftrec4_th(final int n, final float[] a, final int offa, final int nw, final float[] w) { + int i; + int idiv4, m, nthreads; + int idx = 0; + nthreads = 2; + idiv4 = 0; + m = n >> 1; + if (n > ConcurrencyUtils.getThreadsBeginN_1D_FFT_4Threads()) { + nthreads = 4; + idiv4 = 1; + m >>= 1; + } + Future[] futures = new Future[nthreads]; + final int mf = m; + for (i = 0; i < nthreads; i++) { + final int firstIdx = offa + i * m; + if (i != idiv4) { + futures[idx++] = ConcurrencyUtils.submit(new Runnable() { + public void run() { + int isplt, j, k, m; + int idx1 = firstIdx + mf; + m = n; + while (m > 512) { + m >>= 2; + cftmdl1(m, a, idx1 - m, w, nw - (m >> 1)); + } + cftleaf(m, 1, a, idx1 - m, nw, w); + k = 0; + int idx2 = firstIdx - m; + for (j = mf - m; j > 0; j -= m) { + k++; + isplt = cfttree(m, j, k, a, firstIdx, nw, w); + cftleaf(m, isplt, a, idx2 + j, nw, w); + } + } + }); + } else { + futures[idx++] = ConcurrencyUtils.submit(new Runnable() { + public void run() { + int isplt, j, k, m; + int idx1 = firstIdx + mf; + k = 1; + m = n; + while (m > 512) { + m >>= 2; + k <<= 2; + cftmdl2(m, a, idx1 - m, w, nw - m); + } + cftleaf(m, 0, a, idx1 - m, nw, w); + k >>= 1; + int idx2 = firstIdx - m; + for (j = mf - m; j > 0; j -= m) { + k++; + isplt = cfttree(m, j, k, a, firstIdx, nw, w); + cftleaf(m, isplt, a, idx2 + j, nw, w); + } + } + }); + } + } + ConcurrencyUtils.waitForCompletion(futures); + } + + private void cftrec4(int n, float[] a, int offa, int nw, float[] w) { + int isplt, j, k, m; + + m = n; + int idx1 = offa + n; + while (m > 512) { + m >>= 2; + cftmdl1(m, a, idx1 - m, w, nw - (m >> 1)); + } + cftleaf(m, 1, a, idx1 - m, nw, w); + k = 0; + int idx2 = offa - m; + for (j = n - m; j > 0; j -= m) { + k++; + isplt = cfttree(m, j, k, a, offa, nw, w); + cftleaf(m, isplt, a, idx2 + j, nw, w); + } + } + + private int cfttree(int n, int j, int k, float[] a, int offa, int nw, float[] w) { + int i, isplt, m; + int idx1 = offa - n; + if ((k & 3) != 0) { + isplt = k & 1; + if (isplt != 0) { + cftmdl1(n, a, idx1 + j, w, nw - (n >> 1)); + } else { + cftmdl2(n, a, idx1 + j, w, nw - n); + } + } else { + m = n; + for (i = k; (i & 3) == 0; i >>= 2) { + m <<= 2; + } + isplt = i & 1; + int idx2 = offa + j; + if (isplt != 0) { + while (m > 128) { + cftmdl1(m, a, idx2 - m, w, nw - (m >> 1)); + m >>= 2; + } + } else { + while (m > 128) { + cftmdl2(m, a, idx2 - m, w, nw - m); + m >>= 2; + } + } + } + return isplt; + } + + private void cftleaf(int n, int isplt, float[] a, int offa, int nw, float[] w) { + if (n == 512) { + cftmdl1(128, a, offa, w, nw - 64); + cftf161(a, offa, w, nw - 8); + cftf162(a, offa + 32, w, nw - 32); + cftf161(a, offa + 64, w, nw - 8); + cftf161(a, offa + 96, w, nw - 8); + cftmdl2(128, a, offa + 128, w, nw - 128); + cftf161(a, offa + 128, w, nw - 8); + cftf162(a, offa + 160, w, nw - 32); + cftf161(a, offa + 192, w, nw - 8); + cftf162(a, offa + 224, w, nw - 32); + cftmdl1(128, a, offa + 256, w, nw - 64); + cftf161(a, offa + 256, w, nw - 8); + cftf162(a, offa + 288, w, nw - 32); + cftf161(a, offa + 320, w, nw - 8); + cftf161(a, offa + 352, w, nw - 8); + if (isplt != 0) { + cftmdl1(128, a, offa + 384, w, nw - 64); + cftf161(a, offa + 480, w, nw - 8); + } else { + cftmdl2(128, a, offa + 384, w, nw - 128); + cftf162(a, offa + 480, w, nw - 32); + } + cftf161(a, offa + 384, w, nw - 8); + cftf162(a, offa + 416, w, nw - 32); + cftf161(a, offa + 448, w, nw - 8); + } else { + cftmdl1(64, a, offa, w, nw - 32); + cftf081(a, offa, w, nw - 8); + cftf082(a, offa + 16, w, nw - 8); + cftf081(a, offa + 32, w, nw - 8); + cftf081(a, offa + 48, w, nw - 8); + cftmdl2(64, a, offa + 64, w, nw - 64); + cftf081(a, offa + 64, w, nw - 8); + cftf082(a, offa + 80, w, nw - 8); + cftf081(a, offa + 96, w, nw - 8); + cftf082(a, offa + 112, w, nw - 8); + cftmdl1(64, a, offa + 128, w, nw - 32); + cftf081(a, offa + 128, w, nw - 8); + cftf082(a, offa + 144, w, nw - 8); + cftf081(a, offa + 160, w, nw - 8); + cftf081(a, offa + 176, w, nw - 8); + if (isplt != 0) { + cftmdl1(64, a, offa + 192, w, nw - 32); + cftf081(a, offa + 240, w, nw - 8); + } else { + cftmdl2(64, a, offa + 192, w, nw - 64); + cftf082(a, offa + 240, w, nw - 8); + } + cftf081(a, offa + 192, w, nw - 8); + cftf082(a, offa + 208, w, nw - 8); + cftf081(a, offa + 224, w, nw - 8); + } + } + + private void cftmdl1(int n, float[] a, int offa, float[] w, int startw) { + int j0, j1, j2, j3, k, m, mh; + float wn4r, wk1r, wk1i, wk3r, wk3i; + float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; + int idx0, idx1, idx2, idx3, idx4, idx5; + + mh = n >> 3; + m = 2 * mh; + j1 = m; + j2 = j1 + m; + j3 = j2 + m; + idx1 = offa + j1; + idx2 = offa + j2; + idx3 = offa + j3; + x0r = a[offa] + a[idx2]; + x0i = a[offa + 1] + a[idx2 + 1]; + x1r = a[offa] - a[idx2]; + x1i = a[offa + 1] - a[idx2 + 1]; + x2r = a[idx1] + a[idx3]; + x2i = a[idx1 + 1] + a[idx3 + 1]; + x3r = a[idx1] - a[idx3]; + x3i = a[idx1 + 1] - a[idx3 + 1]; + a[offa] = x0r + x2r; + a[offa + 1] = x0i + x2i; + a[idx1] = x0r - x2r; + a[idx1 + 1] = x0i - x2i; + a[idx2] = x1r - x3i; + a[idx2 + 1] = x1i + x3r; + a[idx3] = x1r + x3i; + a[idx3 + 1] = x1i - x3r; + wn4r = w[startw + 1]; + k = 0; + for (int j = 2; j < mh; j += 2) { + k += 4; + idx4 = startw + k; + wk1r = w[idx4]; + wk1i = w[idx4 + 1]; + wk3r = w[idx4 + 2]; + wk3i = w[idx4 + 3]; + j1 = j + m; + j2 = j1 + m; + j3 = j2 + m; + idx1 = offa + j1; + idx2 = offa + j2; + idx3 = offa + j3; + idx5 = offa + j; + x0r = a[idx5] + a[idx2]; + x0i = a[idx5 + 1] + a[idx2 + 1]; + x1r = a[idx5] - a[idx2]; + x1i = a[idx5 + 1] - a[idx2 + 1]; + x2r = a[idx1] + a[idx3]; + x2i = a[idx1 + 1] + a[idx3 + 1]; + x3r = a[idx1] - a[idx3]; + x3i = a[idx1 + 1] - a[idx3 + 1]; + a[idx5] = x0r + x2r; + a[idx5 + 1] = x0i + x2i; + a[idx1] = x0r - x2r; + a[idx1 + 1] = x0i - x2i; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[idx2] = wk1r * x0r - wk1i * x0i; + a[idx2 + 1] = wk1r * x0i + wk1i * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + a[idx3] = wk3r * x0r + wk3i * x0i; + a[idx3 + 1] = wk3r * x0i - wk3i * x0r; + j0 = m - j; + j1 = j0 + m; + j2 = j1 + m; + j3 = j2 + m; + idx0 = offa + j0; + idx1 = offa + j1; + idx2 = offa + j2; + idx3 = offa + j3; + x0r = a[idx0] + a[idx2]; + x0i = a[idx0 + 1] + a[idx2 + 1]; + x1r = a[idx0] - a[idx2]; + x1i = a[idx0 + 1] - a[idx2 + 1]; + x2r = a[idx1] + a[idx3]; + x2i = a[idx1 + 1] + a[idx3 + 1]; + x3r = a[idx1] - a[idx3]; + x3i = a[idx1 + 1] - a[idx3 + 1]; + a[idx0] = x0r + x2r; + a[idx0 + 1] = x0i + x2i; + a[idx1] = x0r - x2r; + a[idx1 + 1] = x0i - x2i; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[idx2] = wk1i * x0r - wk1r * x0i; + a[idx2 + 1] = wk1i * x0i + wk1r * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + a[idx3] = wk3i * x0r + wk3r * x0i; + a[idx3 + 1] = wk3i * x0i - wk3r * x0r; + } + j0 = mh; + j1 = j0 + m; + j2 = j1 + m; + j3 = j2 + m; + idx0 = offa + j0; + idx1 = offa + j1; + idx2 = offa + j2; + idx3 = offa + j3; + x0r = a[idx0] + a[idx2]; + x0i = a[idx0 + 1] + a[idx2 + 1]; + x1r = a[idx0] - a[idx2]; + x1i = a[idx0 + 1] - a[idx2 + 1]; + x2r = a[idx1] + a[idx3]; + x2i = a[idx1 + 1] + a[idx3 + 1]; + x3r = a[idx1] - a[idx3]; + x3i = a[idx1 + 1] - a[idx3 + 1]; + a[idx0] = x0r + x2r; + a[idx0 + 1] = x0i + x2i; + a[idx1] = x0r - x2r; + a[idx1 + 1] = x0i - x2i; + x0r = x1r - x3i; + x0i = x1i + x3r; + a[idx2] = wn4r * (x0r - x0i); + a[idx2 + 1] = wn4r * (x0i + x0r); + x0r = x1r + x3i; + x0i = x1i - x3r; + a[idx3] = -wn4r * (x0r + x0i); + a[idx3 + 1] = -wn4r * (x0i - x0r); + } + + private void cftmdl2(int n, float[] a, int offa, float[] w, int startw) { + int j0, j1, j2, j3, k, kr, m, mh; + float wn4r, wk1r, wk1i, wk3r, wk3i, wd1r, wd1i, wd3r, wd3i; + float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i, y0r, y0i, y2r, y2i; + int idx0, idx1, idx2, idx3, idx4, idx5, idx6; + + mh = n >> 3; + m = 2 * mh; + wn4r = w[startw + 1]; + j1 = m; + j2 = j1 + m; + j3 = j2 + m; + idx1 = offa + j1; + idx2 = offa + j2; + idx3 = offa + j3; + x0r = a[offa] - a[idx2 + 1]; + x0i = a[offa + 1] + a[idx2]; + x1r = a[offa] + a[idx2 + 1]; + x1i = a[offa + 1] - a[idx2]; + x2r = a[idx1] - a[idx3 + 1]; + x2i = a[idx1 + 1] + a[idx3]; + x3r = a[idx1] + a[idx3 + 1]; + x3i = a[idx1 + 1] - a[idx3]; + y0r = wn4r * (x2r - x2i); + y0i = wn4r * (x2i + x2r); + a[offa] = x0r + y0r; + a[offa + 1] = x0i + y0i; + a[idx1] = x0r - y0r; + a[idx1 + 1] = x0i - y0i; + y0r = wn4r * (x3r - x3i); + y0i = wn4r * (x3i + x3r); + a[idx2] = x1r - y0i; + a[idx2 + 1] = x1i + y0r; + a[idx3] = x1r + y0i; + a[idx3 + 1] = x1i - y0r; + k = 0; + kr = 2 * m; + for (int j = 2; j < mh; j += 2) { + k += 4; + idx4 = startw + k; + wk1r = w[idx4]; + wk1i = w[idx4 + 1]; + wk3r = w[idx4 + 2]; + wk3i = w[idx4 + 3]; + kr -= 4; + idx5 = startw + kr; + wd1i = w[idx5]; + wd1r = w[idx5 + 1]; + wd3i = w[idx5 + 2]; + wd3r = w[idx5 + 3]; + j1 = j + m; + j2 = j1 + m; + j3 = j2 + m; + idx1 = offa + j1; + idx2 = offa + j2; + idx3 = offa + j3; + idx6 = offa + j; + x0r = a[idx6] - a[idx2 + 1]; + x0i = a[idx6 + 1] + a[idx2]; + x1r = a[idx6] + a[idx2 + 1]; + x1i = a[idx6 + 1] - a[idx2]; + x2r = a[idx1] - a[idx3 + 1]; + x2i = a[idx1 + 1] + a[idx3]; + x3r = a[idx1] + a[idx3 + 1]; + x3i = a[idx1 + 1] - a[idx3]; + y0r = wk1r * x0r - wk1i * x0i; + y0i = wk1r * x0i + wk1i * x0r; + y2r = wd1r * x2r - wd1i * x2i; + y2i = wd1r * x2i + wd1i * x2r; + a[idx6] = y0r + y2r; + a[idx6 + 1] = y0i + y2i; + a[idx1] = y0r - y2r; + a[idx1 + 1] = y0i - y2i; + y0r = wk3r * x1r + wk3i * x1i; + y0i = wk3r * x1i - wk3i * x1r; + y2r = wd3r * x3r + wd3i * x3i; + y2i = wd3r * x3i - wd3i * x3r; + a[idx2] = y0r + y2r; + a[idx2 + 1] = y0i + y2i; + a[idx3] = y0r - y2r; + a[idx3 + 1] = y0i - y2i; + j0 = m - j; + j1 = j0 + m; + j2 = j1 + m; + j3 = j2 + m; + idx0 = offa + j0; + idx1 = offa + j1; + idx2 = offa + j2; + idx3 = offa + j3; + x0r = a[idx0] - a[idx2 + 1]; + x0i = a[idx0 + 1] + a[idx2]; + x1r = a[idx0] + a[idx2 + 1]; + x1i = a[idx0 + 1] - a[idx2]; + x2r = a[idx1] - a[idx3 + 1]; + x2i = a[idx1 + 1] + a[idx3]; + x3r = a[idx1] + a[idx3 + 1]; + x3i = a[idx1 + 1] - a[idx3]; + y0r = wd1i * x0r - wd1r * x0i; + y0i = wd1i * x0i + wd1r * x0r; + y2r = wk1i * x2r - wk1r * x2i; + y2i = wk1i * x2i + wk1r * x2r; + a[idx0] = y0r + y2r; + a[idx0 + 1] = y0i + y2i; + a[idx1] = y0r - y2r; + a[idx1 + 1] = y0i - y2i; + y0r = wd3i * x1r + wd3r * x1i; + y0i = wd3i * x1i - wd3r * x1r; + y2r = wk3i * x3r + wk3r * x3i; + y2i = wk3i * x3i - wk3r * x3r; + a[idx2] = y0r + y2r; + a[idx2 + 1] = y0i + y2i; + a[idx3] = y0r - y2r; + a[idx3 + 1] = y0i - y2i; + } + wk1r = w[startw + m]; + wk1i = w[startw + m + 1]; + j0 = mh; + j1 = j0 + m; + j2 = j1 + m; + j3 = j2 + m; + idx0 = offa + j0; + idx1 = offa + j1; + idx2 = offa + j2; + idx3 = offa + j3; + x0r = a[idx0] - a[idx2 + 1]; + x0i = a[idx0 + 1] + a[idx2]; + x1r = a[idx0] + a[idx2 + 1]; + x1i = a[idx0 + 1] - a[idx2]; + x2r = a[idx1] - a[idx3 + 1]; + x2i = a[idx1 + 1] + a[idx3]; + x3r = a[idx1] + a[idx3 + 1]; + x3i = a[idx1 + 1] - a[idx3]; + y0r = wk1r * x0r - wk1i * x0i; + y0i = wk1r * x0i + wk1i * x0r; + y2r = wk1i * x2r - wk1r * x2i; + y2i = wk1i * x2i + wk1r * x2r; + a[idx0] = y0r + y2r; + a[idx0 + 1] = y0i + y2i; + a[idx1] = y0r - y2r; + a[idx1 + 1] = y0i - y2i; + y0r = wk1i * x1r - wk1r * x1i; + y0i = wk1i * x1i + wk1r * x1r; + y2r = wk1r * x3r - wk1i * x3i; + y2i = wk1r * x3i + wk1i * x3r; + a[idx2] = y0r - y2r; + a[idx2 + 1] = y0i - y2i; + a[idx3] = y0r + y2r; + a[idx3 + 1] = y0i + y2i; + } + + private void cftfx41(int n, float[] a, int offa, int nw, float[] w) { + if (n == 128) { + cftf161(a, offa, w, nw - 8); + cftf162(a, offa + 32, w, nw - 32); + cftf161(a, offa + 64, w, nw - 8); + cftf161(a, offa + 96, w, nw - 8); + } else { + cftf081(a, offa, w, nw - 8); + cftf082(a, offa + 16, w, nw - 8); + cftf081(a, offa + 32, w, nw - 8); + cftf081(a, offa + 48, w, nw - 8); + } + } + + private void cftf161(float[] a, int offa, float[] w, int startw) { + float wn4r, wk1r, wk1i, x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i, y0r, y0i, y1r, y1i, y2r, y2i, y3r, y3i, y4r, y4i, y5r, y5i, y6r, y6i, y7r, y7i, y8r, y8i, y9r, y9i, y10r, y10i, y11r, y11i, y12r, y12i, y13r, y13i, y14r, y14i, y15r, y15i; + + wn4r = w[startw + 1]; + wk1r = w[startw + 2]; + wk1i = w[startw + 3]; + + x0r = a[offa] + a[offa + 16]; + x0i = a[offa + 1] + a[offa + 17]; + x1r = a[offa] - a[offa + 16]; + x1i = a[offa + 1] - a[offa + 17]; + x2r = a[offa + 8] + a[offa + 24]; + x2i = a[offa + 9] + a[offa + 25]; + x3r = a[offa + 8] - a[offa + 24]; + x3i = a[offa + 9] - a[offa + 25]; + y0r = x0r + x2r; + y0i = x0i + x2i; + y4r = x0r - x2r; + y4i = x0i - x2i; + y8r = x1r - x3i; + y8i = x1i + x3r; + y12r = x1r + x3i; + y12i = x1i - x3r; + x0r = a[offa + 2] + a[offa + 18]; + x0i = a[offa + 3] + a[offa + 19]; + x1r = a[offa + 2] - a[offa + 18]; + x1i = a[offa + 3] - a[offa + 19]; + x2r = a[offa + 10] + a[offa + 26]; + x2i = a[offa + 11] + a[offa + 27]; + x3r = a[offa + 10] - a[offa + 26]; + x3i = a[offa + 11] - a[offa + 27]; + y1r = x0r + x2r; + y1i = x0i + x2i; + y5r = x0r - x2r; + y5i = x0i - x2i; + x0r = x1r - x3i; + x0i = x1i + x3r; + y9r = wk1r * x0r - wk1i * x0i; + y9i = wk1r * x0i + wk1i * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + y13r = wk1i * x0r - wk1r * x0i; + y13i = wk1i * x0i + wk1r * x0r; + x0r = a[offa + 4] + a[offa + 20]; + x0i = a[offa + 5] + a[offa + 21]; + x1r = a[offa + 4] - a[offa + 20]; + x1i = a[offa + 5] - a[offa + 21]; + x2r = a[offa + 12] + a[offa + 28]; + x2i = a[offa + 13] + a[offa + 29]; + x3r = a[offa + 12] - a[offa + 28]; + x3i = a[offa + 13] - a[offa + 29]; + y2r = x0r + x2r; + y2i = x0i + x2i; + y6r = x0r - x2r; + y6i = x0i - x2i; + x0r = x1r - x3i; + x0i = x1i + x3r; + y10r = wn4r * (x0r - x0i); + y10i = wn4r * (x0i + x0r); + x0r = x1r + x3i; + x0i = x1i - x3r; + y14r = wn4r * (x0r + x0i); + y14i = wn4r * (x0i - x0r); + x0r = a[offa + 6] + a[offa + 22]; + x0i = a[offa + 7] + a[offa + 23]; + x1r = a[offa + 6] - a[offa + 22]; + x1i = a[offa + 7] - a[offa + 23]; + x2r = a[offa + 14] + a[offa + 30]; + x2i = a[offa + 15] + a[offa + 31]; + x3r = a[offa + 14] - a[offa + 30]; + x3i = a[offa + 15] - a[offa + 31]; + y3r = x0r + x2r; + y3i = x0i + x2i; + y7r = x0r - x2r; + y7i = x0i - x2i; + x0r = x1r - x3i; + x0i = x1i + x3r; + y11r = wk1i * x0r - wk1r * x0i; + y11i = wk1i * x0i + wk1r * x0r; + x0r = x1r + x3i; + x0i = x1i - x3r; + y15r = wk1r * x0r - wk1i * x0i; + y15i = wk1r * x0i + wk1i * x0r; + x0r = y12r - y14r; + x0i = y12i - y14i; + x1r = y12r + y14r; + x1i = y12i + y14i; + x2r = y13r - y15r; + x2i = y13i - y15i; + x3r = y13r + y15r; + x3i = y13i + y15i; + a[offa + 24] = x0r + x2r; + a[offa + 25] = x0i + x2i; + a[offa + 26] = x0r - x2r; + a[offa + 27] = x0i - x2i; + a[offa + 28] = x1r - x3i; + a[offa + 29] = x1i + x3r; + a[offa + 30] = x1r + x3i; + a[offa + 31] = x1i - x3r; + x0r = y8r + y10r; + x0i = y8i + y10i; + x1r = y8r - y10r; + x1i = y8i - y10i; + x2r = y9r + y11r; + x2i = y9i + y11i; + x3r = y9r - y11r; + x3i = y9i - y11i; + a[offa + 16] = x0r + x2r; + a[offa + 17] = x0i + x2i; + a[offa + 18] = x0r - x2r; + a[offa + 19] = x0i - x2i; + a[offa + 20] = x1r - x3i; + a[offa + 21] = x1i + x3r; + a[offa + 22] = x1r + x3i; + a[offa + 23] = x1i - x3r; + x0r = y5r - y7i; + x0i = y5i + y7r; + x2r = wn4r * (x0r - x0i); + x2i = wn4r * (x0i + x0r); + x0r = y5r + y7i; + x0i = y5i - y7r; + x3r = wn4r * (x0r - x0i); + x3i = wn4r * (x0i + x0r); + x0r = y4r - y6i; + x0i = y4i + y6r; + x1r = y4r + y6i; + x1i = y4i - y6r; + a[offa + 8] = x0r + x2r; + a[offa + 9] = x0i + x2i; + a[offa + 10] = x0r - x2r; + a[offa + 11] = x0i - x2i; + a[offa + 12] = x1r - x3i; + a[offa + 13] = x1i + x3r; + a[offa + 14] = x1r + x3i; + a[offa + 15] = x1i - x3r; + x0r = y0r + y2r; + x0i = y0i + y2i; + x1r = y0r - y2r; + x1i = y0i - y2i; + x2r = y1r + y3r; + x2i = y1i + y3i; + x3r = y1r - y3r; + x3i = y1i - y3i; + a[offa] = x0r + x2r; + a[offa + 1] = x0i + x2i; + a[offa + 2] = x0r - x2r; + a[offa + 3] = x0i - x2i; + a[offa + 4] = x1r - x3i; + a[offa + 5] = x1i + x3r; + a[offa + 6] = x1r + x3i; + a[offa + 7] = x1i - x3r; + } + + private void cftf162(float[] a, int offa, float[] w, int startw) { + float wn4r, wk1r, wk1i, wk2r, wk2i, wk3r, wk3i, x0r, x0i, x1r, x1i, x2r, x2i, y0r, y0i, y1r, y1i, y2r, y2i, y3r, y3i, y4r, y4i, y5r, y5i, y6r, y6i, y7r, y7i, y8r, y8i, y9r, y9i, y10r, y10i, y11r, y11i, y12r, y12i, y13r, y13i, y14r, y14i, y15r, y15i; + + wn4r = w[startw + 1]; + wk1r = w[startw + 4]; + wk1i = w[startw + 5]; + wk3r = w[startw + 6]; + wk3i = -w[startw + 7]; + wk2r = w[startw + 8]; + wk2i = w[startw + 9]; + x1r = a[offa] - a[offa + 17]; + x1i = a[offa + 1] + a[offa + 16]; + x0r = a[offa + 8] - a[offa + 25]; + x0i = a[offa + 9] + a[offa + 24]; + x2r = wn4r * (x0r - x0i); + x2i = wn4r * (x0i + x0r); + y0r = x1r + x2r; + y0i = x1i + x2i; + y4r = x1r - x2r; + y4i = x1i - x2i; + x1r = a[offa] + a[offa + 17]; + x1i = a[offa + 1] - a[offa + 16]; + x0r = a[offa + 8] + a[offa + 25]; + x0i = a[offa + 9] - a[offa + 24]; + x2r = wn4r * (x0r - x0i); + x2i = wn4r * (x0i + x0r); + y8r = x1r - x2i; + y8i = x1i + x2r; + y12r = x1r + x2i; + y12i = x1i - x2r; + x0r = a[offa + 2] - a[offa + 19]; + x0i = a[offa + 3] + a[offa + 18]; + x1r = wk1r * x0r - wk1i * x0i; + x1i = wk1r * x0i + wk1i * x0r; + x0r = a[offa + 10] - a[offa + 27]; + x0i = a[offa + 11] + a[offa + 26]; + x2r = wk3i * x0r - wk3r * x0i; + x2i = wk3i * x0i + wk3r * x0r; + y1r = x1r + x2r; + y1i = x1i + x2i; + y5r = x1r - x2r; + y5i = x1i - x2i; + x0r = a[offa + 2] + a[offa + 19]; + x0i = a[offa + 3] - a[offa + 18]; + x1r = wk3r * x0r - wk3i * x0i; + x1i = wk3r * x0i + wk3i * x0r; + x0r = a[offa + 10] + a[offa + 27]; + x0i = a[offa + 11] - a[offa + 26]; + x2r = wk1r * x0r + wk1i * x0i; + x2i = wk1r * x0i - wk1i * x0r; + y9r = x1r - x2r; + y9i = x1i - x2i; + y13r = x1r + x2r; + y13i = x1i + x2i; + x0r = a[offa + 4] - a[offa + 21]; + x0i = a[offa + 5] + a[offa + 20]; + x1r = wk2r * x0r - wk2i * x0i; + x1i = wk2r * x0i + wk2i * x0r; + x0r = a[offa + 12] - a[offa + 29]; + x0i = a[offa + 13] + a[offa + 28]; + x2r = wk2i * x0r - wk2r * x0i; + x2i = wk2i * x0i + wk2r * x0r; + y2r = x1r + x2r; + y2i = x1i + x2i; + y6r = x1r - x2r; + y6i = x1i - x2i; + x0r = a[offa + 4] + a[offa + 21]; + x0i = a[offa + 5] - a[offa + 20]; + x1r = wk2i * x0r - wk2r * x0i; + x1i = wk2i * x0i + wk2r * x0r; + x0r = a[offa + 12] + a[offa + 29]; + x0i = a[offa + 13] - a[offa + 28]; + x2r = wk2r * x0r - wk2i * x0i; + x2i = wk2r * x0i + wk2i * x0r; + y10r = x1r - x2r; + y10i = x1i - x2i; + y14r = x1r + x2r; + y14i = x1i + x2i; + x0r = a[offa + 6] - a[offa + 23]; + x0i = a[offa + 7] + a[offa + 22]; + x1r = wk3r * x0r - wk3i * x0i; + x1i = wk3r * x0i + wk3i * x0r; + x0r = a[offa + 14] - a[offa + 31]; + x0i = a[offa + 15] + a[offa + 30]; + x2r = wk1i * x0r - wk1r * x0i; + x2i = wk1i * x0i + wk1r * x0r; + y3r = x1r + x2r; + y3i = x1i + x2i; + y7r = x1r - x2r; + y7i = x1i - x2i; + x0r = a[offa + 6] + a[offa + 23]; + x0i = a[offa + 7] - a[offa + 22]; + x1r = wk1i * x0r + wk1r * x0i; + x1i = wk1i * x0i - wk1r * x0r; + x0r = a[offa + 14] + a[offa + 31]; + x0i = a[offa + 15] - a[offa + 30]; + x2r = wk3i * x0r - wk3r * x0i; + x2i = wk3i * x0i + wk3r * x0r; + y11r = x1r + x2r; + y11i = x1i + x2i; + y15r = x1r - x2r; + y15i = x1i - x2i; + x1r = y0r + y2r; + x1i = y0i + y2i; + x2r = y1r + y3r; + x2i = y1i + y3i; + a[offa] = x1r + x2r; + a[offa + 1] = x1i + x2i; + a[offa + 2] = x1r - x2r; + a[offa + 3] = x1i - x2i; + x1r = y0r - y2r; + x1i = y0i - y2i; + x2r = y1r - y3r; + x2i = y1i - y3i; + a[offa + 4] = x1r - x2i; + a[offa + 5] = x1i + x2r; + a[offa + 6] = x1r + x2i; + a[offa + 7] = x1i - x2r; + x1r = y4r - y6i; + x1i = y4i + y6r; + x0r = y5r - y7i; + x0i = y5i + y7r; + x2r = wn4r * (x0r - x0i); + x2i = wn4r * (x0i + x0r); + a[offa + 8] = x1r + x2r; + a[offa + 9] = x1i + x2i; + a[offa + 10] = x1r - x2r; + a[offa + 11] = x1i - x2i; + x1r = y4r + y6i; + x1i = y4i - y6r; + x0r = y5r + y7i; + x0i = y5i - y7r; + x2r = wn4r * (x0r - x0i); + x2i = wn4r * (x0i + x0r); + a[offa + 12] = x1r - x2i; + a[offa + 13] = x1i + x2r; + a[offa + 14] = x1r + x2i; + a[offa + 15] = x1i - x2r; + x1r = y8r + y10r; + x1i = y8i + y10i; + x2r = y9r - y11r; + x2i = y9i - y11i; + a[offa + 16] = x1r + x2r; + a[offa + 17] = x1i + x2i; + a[offa + 18] = x1r - x2r; + a[offa + 19] = x1i - x2i; + x1r = y8r - y10r; + x1i = y8i - y10i; + x2r = y9r + y11r; + x2i = y9i + y11i; + a[offa + 20] = x1r - x2i; + a[offa + 21] = x1i + x2r; + a[offa + 22] = x1r + x2i; + a[offa + 23] = x1i - x2r; + x1r = y12r - y14i; + x1i = y12i + y14r; + x0r = y13r + y15i; + x0i = y13i - y15r; + x2r = wn4r * (x0r - x0i); + x2i = wn4r * (x0i + x0r); + a[offa + 24] = x1r + x2r; + a[offa + 25] = x1i + x2i; + a[offa + 26] = x1r - x2r; + a[offa + 27] = x1i - x2i; + x1r = y12r + y14i; + x1i = y12i - y14r; + x0r = y13r - y15i; + x0i = y13i + y15r; + x2r = wn4r * (x0r - x0i); + x2i = wn4r * (x0i + x0r); + a[offa + 28] = x1r - x2i; + a[offa + 29] = x1i + x2r; + a[offa + 30] = x1r + x2i; + a[offa + 31] = x1i - x2r; + } + + private void cftf081(float[] a, int offa, float[] w, int startw) { + float wn4r, x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i, y0r, y0i, y1r, y1i, y2r, y2i, y3r, y3i, y4r, y4i, y5r, y5i, y6r, y6i, y7r, y7i; + + wn4r = w[startw + 1]; + x0r = a[offa] + a[offa + 8]; + x0i = a[offa + 1] + a[offa + 9]; + x1r = a[offa] - a[offa + 8]; + x1i = a[offa + 1] - a[offa + 9]; + x2r = a[offa + 4] + a[offa + 12]; + x2i = a[offa + 5] + a[offa + 13]; + x3r = a[offa + 4] - a[offa + 12]; + x3i = a[offa + 5] - a[offa + 13]; + y0r = x0r + x2r; + y0i = x0i + x2i; + y2r = x0r - x2r; + y2i = x0i - x2i; + y1r = x1r - x3i; + y1i = x1i + x3r; + y3r = x1r + x3i; + y3i = x1i - x3r; + x0r = a[offa + 2] + a[offa + 10]; + x0i = a[offa + 3] + a[offa + 11]; + x1r = a[offa + 2] - a[offa + 10]; + x1i = a[offa + 3] - a[offa + 11]; + x2r = a[offa + 6] + a[offa + 14]; + x2i = a[offa + 7] + a[offa + 15]; + x3r = a[offa + 6] - a[offa + 14]; + x3i = a[offa + 7] - a[offa + 15]; + y4r = x0r + x2r; + y4i = x0i + x2i; + y6r = x0r - x2r; + y6i = x0i - x2i; + x0r = x1r - x3i; + x0i = x1i + x3r; + x2r = x1r + x3i; + x2i = x1i - x3r; + y5r = wn4r * (x0r - x0i); + y5i = wn4r * (x0r + x0i); + y7r = wn4r * (x2r - x2i); + y7i = wn4r * (x2r + x2i); + a[offa + 8] = y1r + y5r; + a[offa + 9] = y1i + y5i; + a[offa + 10] = y1r - y5r; + a[offa + 11] = y1i - y5i; + a[offa + 12] = y3r - y7i; + a[offa + 13] = y3i + y7r; + a[offa + 14] = y3r + y7i; + a[offa + 15] = y3i - y7r; + a[offa] = y0r + y4r; + a[offa + 1] = y0i + y4i; + a[offa + 2] = y0r - y4r; + a[offa + 3] = y0i - y4i; + a[offa + 4] = y2r - y6i; + a[offa + 5] = y2i + y6r; + a[offa + 6] = y2r + y6i; + a[offa + 7] = y2i - y6r; + } + + private void cftf082(float[] a, int offa, float[] w, int startw) { + float wn4r, wk1r, wk1i, x0r, x0i, x1r, x1i, y0r, y0i, y1r, y1i, y2r, y2i, y3r, y3i, y4r, y4i, y5r, y5i, y6r, y6i, y7r, y7i; + + wn4r = w[startw + 1]; + wk1r = w[startw + 2]; + wk1i = w[startw + 3]; + y0r = a[offa] - a[offa + 9]; + y0i = a[offa + 1] + a[offa + 8]; + y1r = a[offa] + a[offa + 9]; + y1i = a[offa + 1] - a[offa + 8]; + x0r = a[offa + 4] - a[offa + 13]; + x0i = a[offa + 5] + a[offa + 12]; + y2r = wn4r * (x0r - x0i); + y2i = wn4r * (x0i + x0r); + x0r = a[offa + 4] + a[offa + 13]; + x0i = a[offa + 5] - a[offa + 12]; + y3r = wn4r * (x0r - x0i); + y3i = wn4r * (x0i + x0r); + x0r = a[offa + 2] - a[offa + 11]; + x0i = a[offa + 3] + a[offa + 10]; + y4r = wk1r * x0r - wk1i * x0i; + y4i = wk1r * x0i + wk1i * x0r; + x0r = a[offa + 2] + a[offa + 11]; + x0i = a[offa + 3] - a[offa + 10]; + y5r = wk1i * x0r - wk1r * x0i; + y5i = wk1i * x0i + wk1r * x0r; + x0r = a[offa + 6] - a[offa + 15]; + x0i = a[offa + 7] + a[offa + 14]; + y6r = wk1i * x0r - wk1r * x0i; + y6i = wk1i * x0i + wk1r * x0r; + x0r = a[offa + 6] + a[offa + 15]; + x0i = a[offa + 7] - a[offa + 14]; + y7r = wk1r * x0r - wk1i * x0i; + y7i = wk1r * x0i + wk1i * x0r; + x0r = y0r + y2r; + x0i = y0i + y2i; + x1r = y4r + y6r; + x1i = y4i + y6i; + a[offa] = x0r + x1r; + a[offa + 1] = x0i + x1i; + a[offa + 2] = x0r - x1r; + a[offa + 3] = x0i - x1i; + x0r = y0r - y2r; + x0i = y0i - y2i; + x1r = y4r - y6r; + x1i = y4i - y6i; + a[offa + 4] = x0r - x1i; + a[offa + 5] = x0i + x1r; + a[offa + 6] = x0r + x1i; + a[offa + 7] = x0i - x1r; + x0r = y1r - y3i; + x0i = y1i + y3r; + x1r = y5r - y7r; + x1i = y5i - y7i; + a[offa + 8] = x0r + x1r; + a[offa + 9] = x0i + x1i; + a[offa + 10] = x0r - x1r; + a[offa + 11] = x0i - x1i; + x0r = y1r + y3i; + x0i = y1i - y3r; + x1r = y5r + y7r; + x1i = y5i + y7i; + a[offa + 12] = x0r - x1i; + a[offa + 13] = x0i + x1r; + a[offa + 14] = x0r + x1i; + a[offa + 15] = x0i - x1r; + } + + private void cftf040(float[] a, int offa) { + float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; + + x0r = a[offa] + a[offa + 4]; + x0i = a[offa + 1] + a[offa + 5]; + x1r = a[offa] - a[offa + 4]; + x1i = a[offa + 1] - a[offa + 5]; + x2r = a[offa + 2] + a[offa + 6]; + x2i = a[offa + 3] + a[offa + 7]; + x3r = a[offa + 2] - a[offa + 6]; + x3i = a[offa + 3] - a[offa + 7]; + a[offa] = x0r + x2r; + a[offa + 1] = x0i + x2i; + a[offa + 2] = x1r - x3i; + a[offa + 3] = x1i + x3r; + a[offa + 4] = x0r - x2r; + a[offa + 5] = x0i - x2i; + a[offa + 6] = x1r + x3i; + a[offa + 7] = x1i - x3r; + } + + private void cftb040(float[] a, int offa) { + float x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; + + x0r = a[offa] + a[offa + 4]; + x0i = a[offa + 1] + a[offa + 5]; + x1r = a[offa] - a[offa + 4]; + x1i = a[offa + 1] - a[offa + 5]; + x2r = a[offa + 2] + a[offa + 6]; + x2i = a[offa + 3] + a[offa + 7]; + x3r = a[offa + 2] - a[offa + 6]; + x3i = a[offa + 3] - a[offa + 7]; + a[offa] = x0r + x2r; + a[offa + 1] = x0i + x2i; + a[offa + 2] = x1r + x3i; + a[offa + 3] = x1i - x3r; + a[offa + 4] = x0r - x2r; + a[offa + 5] = x0i - x2i; + a[offa + 6] = x1r - x3i; + a[offa + 7] = x1i + x3r; + } + + private void cftx020(float[] a, int offa) { + float x0r, x0i; + x0r = a[offa] - a[offa + 2]; + x0i = -a[offa + 1] + a[offa + 3]; + a[offa] += a[offa + 2]; + a[offa + 1] += a[offa + 3]; + a[offa + 2] = x0r; + a[offa + 3] = x0i; + } + + private void cftxb020(float[] a, int offa) { + float x0r, x0i; + + x0r = a[offa] - a[offa + 2]; + x0i = a[offa + 1] - a[offa + 3]; + a[offa] += a[offa + 2]; + a[offa + 1] += a[offa + 3]; + a[offa + 2] = x0r; + a[offa + 3] = x0i; + } + + private void cftxc020(float[] a, int offa) { + float x0r, x0i; + x0r = a[offa] - a[offa + 2]; + x0i = a[offa + 1] + a[offa + 3]; + a[offa] += a[offa + 2]; + a[offa + 1] -= a[offa + 3]; + a[offa + 2] = x0r; + a[offa + 3] = x0i; + } + + private void rftfsub(int n, float[] a, int offa, int nc, float[] c, int startc) { + int k, kk, ks, m; + float wkr, wki, xr, xi, yr, yi; + int idx1, idx2; + + m = n >> 1; + ks = 2 * nc / m; + kk = 0; + for (int j = 2; j < m; j += 2) { + k = n - j; + kk += ks; + wkr = (float)(0.5 - c[startc + nc - kk]); + wki = c[startc + kk]; + idx1 = offa + j; + idx2 = offa + k; + xr = a[idx1] - a[idx2]; + xi = a[idx1 + 1] + a[idx2 + 1]; + yr = wkr * xr - wki * xi; + yi = wkr * xi + wki * xr; + a[idx1] -= yr; + a[idx1 + 1] = yi - a[idx1 + 1]; + a[idx2] += yr; + a[idx2 + 1] = yi - a[idx2 + 1]; + } + a[offa + m + 1] = -a[offa + m + 1]; + } + + private void rftbsub(int n, float[] a, int offa, int nc, float[] c, int startc) { + int k, kk, ks, m; + float wkr, wki, xr, xi, yr, yi; + int idx1, idx2; + + m = n >> 1; + ks = 2 * nc / m; + kk = 0; + for (int j = 2; j < m; j += 2) { + k = n - j; + kk += ks; + wkr = (float)(0.5 - c[startc + nc - kk]); + wki = c[startc + kk]; + idx1 = offa + j; + idx2 = offa + k; + xr = a[idx1] - a[idx2]; + xi = a[idx1 + 1] + a[idx2 + 1]; + yr = wkr * xr - wki * xi; + yi = wkr * xi + wki * xr; + a[idx1] -= yr; + a[idx1 + 1] -= yi; + a[idx2] += yr; + a[idx2 + 1] -= yi; + } + } + + private void scale(final float m, final float[] a, int offa, boolean complex) { + final float norm = (float)(1.0 / m); + int n2; + if (complex) { + n2 = 2 * n; + } else { + n2 = n; + } + int nthreads = ConcurrencyUtils.getNumberOfThreads(); + if ((nthreads > 1) && (n2 >= ConcurrencyUtils.getThreadsBeginN_1D_FFT_2Threads())) { + final int k = n2 / nthreads; + Future[] futures = new Future[nthreads]; + for (int i = 0; i < nthreads; i++) { + final int firstIdx = offa + i * k; + final int lastIdx = (i == (nthreads - 1)) ? offa + n2 : firstIdx + k; + futures[i] = ConcurrencyUtils.submit(new Runnable() { + + public void run() { + for (int i = firstIdx; i < lastIdx; i++) { + a[i] *= norm; + } + } + }); + } + ConcurrencyUtils.waitForCompletion(futures); + } else { + for (int i = offa; i < offa + n2; i++) { + a[i] *= norm; + } + + } + } +} diff --git a/app/src/main/java/be/tarsos/dsp/util/fft/GaussWindow.java b/app/src/main/java/be/tarsos/dsp/util/fft/GaussWindow.java new file mode 100644 index 0000000..efcde8a --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/fft/GaussWindow.java @@ -0,0 +1,77 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* + * Copyright (c) 2007 - 2008 by Damien Di Fede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Library General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package be.tarsos.dsp.util.fft; + +/** + * A Gauss window function. + * + * @author Damien Di Fede + * @author Corban Brook + * @see The + * Gauss Window + */ +public class GaussWindow extends WindowFunction { + double alpha; + + /** + * Constructs a Gauss window function. + * + * @param alpha the alpha parameter for the window construction. + */ + public GaussWindow(double alpha) { + if (alpha < 0.0 || alpha > 0.5) { + new IllegalArgumentException( + "Range for GaussWindow out of bounds. Value must be <= 0.5"); + } else { + this.alpha = alpha; + } + } + + /** Constructs a Gauss window with a default alpha value of 0.25 */ + public GaussWindow() { + this(0.25); + } + + protected float value(int length, int index) { + return (float) Math.pow(Math.E, -0.5 * Math.pow((index - (length - 1) / (double) 2) / (this.alpha * (length - 1) / (double) 2),(double) 2)); + } +} diff --git a/app/src/main/java/be/tarsos/dsp/util/fft/HammingWindow.java b/app/src/main/java/be/tarsos/dsp/util/fft/HammingWindow.java new file mode 100644 index 0000000..e77a70d --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/fft/HammingWindow.java @@ -0,0 +1,66 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* + * Copyright (c) 2007 - 2008 by Damien Di Fede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Library General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package be.tarsos.dsp.util.fft; + +/** + * A Hamming window function. + * + * @author Damien Di Fede + * @author Corban Brook + * @see The + * Hamming Window + * + */ +public class HammingWindow extends WindowFunction { + /** + * Constructs a Hamming window. + */ + public HammingWindow() { + super(); + } + + protected float value(int length, int index) { + return 0.54f - 0.46f * (float) Math.cos(TWO_PI * index / (length - 1)); + } +} + diff --git a/app/src/main/java/be/tarsos/dsp/util/fft/HannWindow.java b/app/src/main/java/be/tarsos/dsp/util/fft/HannWindow.java new file mode 100644 index 0000000..a0e6797 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/fft/HannWindow.java @@ -0,0 +1,61 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* + * Copyright (c) 2007 - 2008 by Damien Di Fede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Library General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package be.tarsos.dsp.util.fft; + +/** + * A Hann window function. + * + * @author Damien Di Fede + * @author Corban Brook + * @see The + * Hann Window + */ +public class HannWindow extends WindowFunction { + /** Constructs a Hann window. */ + public HannWindow() { + } + + protected float value(int length, int index) { + //equal to 0.5 - 0.5 * Math.cos (TWO_PI * index / (length-1f)); + return 0.5f * (1f - (float) Math.cos(TWO_PI * index / (length - 1f))); + } +} diff --git a/app/src/main/java/be/tarsos/dsp/util/fft/LanczosWindow.java b/app/src/main/java/be/tarsos/dsp/util/fft/LanczosWindow.java new file mode 100644 index 0000000..e550817 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/fft/LanczosWindow.java @@ -0,0 +1,62 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* + * Copyright (c) 2007 - 2008 by Damien Di Fede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Library General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package be.tarsos.dsp.util.fft; + +/** + * A Lanczos window function. + * + * @author Damien Di Fede + * @author Corban Brook + * @see The + * Lanczos Window + */ +public class LanczosWindow extends WindowFunction { + /** Constructs a Lanczos window. */ + public LanczosWindow() { + } + + protected float value(int length, int index) { + float x = 2 * index / (float) (length - 1) - 1; + return (float) (Math.sin(Math.PI * x) / (Math.PI * x)); + } +} diff --git a/app/src/main/java/be/tarsos/dsp/util/fft/RectangularWindow.java b/app/src/main/java/be/tarsos/dsp/util/fft/RectangularWindow.java new file mode 100644 index 0000000..03f9395 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/fft/RectangularWindow.java @@ -0,0 +1,62 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* + * Copyright (c) 2007 - 2008 by Damien Di Fede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Library General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package be.tarsos.dsp.util.fft; + +/** + * A Rectangular window function A Rectangular window is equivalent to using no + * window at all. + * + * @author Damien Di Fede + * @author Corban Brook + * @see The + * Rectangular Window + */ +public class RectangularWindow extends WindowFunction { + /** Constructs a Rectangular window. */ + public RectangularWindow() { + } + + protected float value(int length, int index) { + return 1f; + } +} diff --git a/app/src/main/java/be/tarsos/dsp/util/fft/ScaledHammingWindow.java b/app/src/main/java/be/tarsos/dsp/util/fft/ScaledHammingWindow.java new file mode 100644 index 0000000..3521b06 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/fft/ScaledHammingWindow.java @@ -0,0 +1,35 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.util.fft; + +public class ScaledHammingWindow extends WindowFunction { + + @Override + protected float value(int length, int index) { + double scale = 1.0 / (double)length / 0.54; + double factor = TWO_PI / (double)length; + return (float) (scale * (25.0/46.0 - 21.0/46.0 * Math.cos(factor * index))); + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/util/fft/TriangularWindow.java b/app/src/main/java/be/tarsos/dsp/util/fft/TriangularWindow.java new file mode 100644 index 0000000..e5ec952 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/fft/TriangularWindow.java @@ -0,0 +1,62 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* + * Copyright (c) 2007 - 2008 by Damien Di Fede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Library General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package be.tarsos.dsp.util.fft; + +/** + * A Triangular window function. + * + * @author Damien Di Fede + * @author Corban Brook + * @see The + * Triangular Window + */ +public class TriangularWindow extends WindowFunction { + /** Constructs a Triangular window. */ + public TriangularWindow() { + } + + protected float value(int length, int index) { + return 2f / length + * (length / 2f - Math.abs(index - (length - 1) / 2f)); + } +} diff --git a/app/src/main/java/be/tarsos/dsp/util/fft/WindowFunction.java b/app/src/main/java/be/tarsos/dsp/util/fft/WindowFunction.java new file mode 100644 index 0000000..d0060f5 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/fft/WindowFunction.java @@ -0,0 +1,128 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +/* + * Copyright (c) 2007 - 2008 by Damien Di Fede + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU Library General Public License as published + * by the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +package be.tarsos.dsp.util.fft; + +/** + * A Window function represents a curve which is applied to a sample buffer to + * reduce the introduction of spectral leakage in the Fourier transform. + * + *

+ * Windowing + *

+ * Windowing is the process of shaping the audio samples before transforming + * them to the frequency domain. The Fourier Transform assumes the sample buffer + * is is a repetitive signal, if a sample buffer is not truly periodic within + * the measured interval sharp discontinuities may arise that can introduce + * spectral leakage. Spectral leakage is the speading of signal energy across + * multiple FFT bins. This "spreading" can drown out narrow band signals and + * hinder detection. + *

+ * A windowing + * function attempts to reduce spectral leakage by attenuating the measured + * sample buffer at its end points to eliminate discontinuities. If you call the + * window() function with an appropriate WindowFunction, such as + * HammingWindow(), the sample buffers passed to the object for + * analysis will be shaped by the current window before being transformed. The + * result of using a window is to reduce the leakage in the spectrum somewhat. + *

+ * WindowFunction handles work associated with various window + * functions such as the Hamming window. To create your own window function you + * must extend WindowFunction and implement the + * {@link #value(int, int) value} method which defines the shape of the window + * at a given offset. WindowFunction will call this method to apply + * the window to a sample buffer. The number passed to the method is an offset + * within the length of the window curve. + * + * @author Damien Di Fede + * @author Corban Brook + * + */ +public abstract class WindowFunction { + + /** The float value of 2*PI. Provided as a convenience for subclasses. */ + protected static final float TWO_PI = (float) (2 * Math.PI); + protected int length; + + + /** + * Construct a new window. + */ + protected WindowFunction() { + } + + /** + * Apply the window function to a sample buffer. + * + * @param samples + * a sample buffer + */ + public void apply(float[] samples) { + this.length = samples.length; + + for (int n = 0; n < samples.length; n++) { + samples[n] *= value(samples.length, n); + } + } + + /** + * Generates the curve of the window function. + * + * @param length + * the length of the window + * @return the shape of the window function + */ + public float[] generateCurve(int length) { + float[] samples = new float[length]; + for (int n = 0; n < length; n++) { + samples[n] = value(length, n); + } + return samples; + } + + /** + * The value of the window function + * @param length with the lengt of the window (in samples) + * @param index at index + * @return The value of the window function at the requested index. + */ + protected abstract float value(int length, int index); +} diff --git a/app/src/main/java/be/tarsos/dsp/util/fft/package-info.java b/app/src/main/java/be/tarsos/dsp/util/fft/package-info.java new file mode 100644 index 0000000..d8e4ab9 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/fft/package-info.java @@ -0,0 +1,28 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/** + * Utilities needed for FFT. + */ +package be.tarsos.dsp.util.fft; diff --git a/app/src/main/java/be/tarsos/dsp/util/package-info.java b/app/src/main/java/be/tarsos/dsp/util/package-info.java new file mode 100644 index 0000000..adf2e8d --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/util/package-info.java @@ -0,0 +1,28 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/** + * Utility classes to handle sampled sound. + */ +package be.tarsos.dsp.util; diff --git a/app/src/main/java/be/tarsos/dsp/wavelet/HaarWaveletCoder.java b/app/src/main/java/be/tarsos/dsp/wavelet/HaarWaveletCoder.java new file mode 100644 index 0000000..2492827 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/wavelet/HaarWaveletCoder.java @@ -0,0 +1,83 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.wavelet; + +import java.util.Arrays; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; + +public class HaarWaveletCoder implements AudioProcessor{ + + private final HaarWaveletTransform transform; + + private int compression; + + public HaarWaveletCoder(){ + this(16); + } + + public HaarWaveletCoder(int compression){ + transform = new HaarWaveletTransform(); + this.compression = compression; + } + + + @Override + public boolean process(AudioEvent audioEvent) { + + float[] audioBuffer = audioEvent.getFloatBuffer(); + float[] sortBuffer = new float[audioBuffer.length]; + transform.transform(audioEvent.getFloatBuffer()); + + for (int i = 0; i < sortBuffer.length; i++) { + sortBuffer[i] = Math.abs(audioBuffer[i]); + } + Arrays.sort(sortBuffer); + + double threshold = sortBuffer[compression]; + + for (int i = 0; i < audioBuffer.length; i++) { + if (Math.abs(audioBuffer[i]) <= threshold) { + audioBuffer[i] = 0; + } + } + + return true; + } + + @Override + public void processingFinished() { + + } + + public void setCompression(int compression){ + this.compression = compression; + } + + public int getCompression(){ + return this.compression; + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/wavelet/HaarWaveletDecoder.java b/app/src/main/java/be/tarsos/dsp/wavelet/HaarWaveletDecoder.java new file mode 100644 index 0000000..789c3ad --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/wavelet/HaarWaveletDecoder.java @@ -0,0 +1,49 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.wavelet; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; + +public class HaarWaveletDecoder implements AudioProcessor{ + + private final HaarWaveletTransform transform; + + public HaarWaveletDecoder(){ + transform = new HaarWaveletTransform(); + } + + @Override + public boolean process(AudioEvent audioEvent) { + float[] audioBuffer = audioEvent.getFloatBuffer(); + transform.inverseTransform(audioBuffer); + return true; + } + + @Override + public void processingFinished() { + + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/wavelet/HaarWaveletFileReader.java b/app/src/main/java/be/tarsos/dsp/wavelet/HaarWaveletFileReader.java new file mode 100644 index 0000000..e1766e9 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/wavelet/HaarWaveletFileReader.java @@ -0,0 +1,99 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.wavelet; + +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; + +public class HaarWaveletFileReader implements AudioProcessor { + + private final int compression; + private FileInputStream rawInputStream; + + public HaarWaveletFileReader(String fileName, int compression){ + this.compression = compression; + try { + this.rawInputStream = new FileInputStream(fileName); + } catch (FileNotFoundException e) { + this.rawInputStream = null; + } + } + + @Override + public boolean process(AudioEvent audioEvent) { + + float[] audioBuffer = new float[32]; + + byte[] byteBuffer = new byte[(32-compression)*2]; + int placesWithZero = 0; + try { + rawInputStream.read(byteBuffer); + placesWithZero += rawInputStream.read(); + placesWithZero += (rawInputStream.read()<<8); + placesWithZero += (rawInputStream.read()<<16); + placesWithZero += (rawInputStream.read()<<24); + } catch (IOException e) { + e.printStackTrace(); + } + + int byteBufferIndex = 0; + for(int i = 0 ; i < audioBuffer.length ; i++){ + if((placesWithZero & (1< 0; + } catch (IOException e) { + + e.printStackTrace(); + } + + return more; + } + + @Override + public void processingFinished() { + try { + rawInputStream.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/wavelet/HaarWaveletFileWriter.java b/app/src/main/java/be/tarsos/dsp/wavelet/HaarWaveletFileWriter.java new file mode 100644 index 0000000..b3d18f6 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/wavelet/HaarWaveletFileWriter.java @@ -0,0 +1,107 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.wavelet; + +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; + +public class HaarWaveletFileWriter implements AudioProcessor { + + + private final int compression; + private FileOutputStream rawOutputStream; + + public HaarWaveletFileWriter(String fileName, int compression){ + this.compression = compression; + try { + this.rawOutputStream = new FileOutputStream(fileName); + } catch (FileNotFoundException e) { + this.rawOutputStream = null; + } + } + + @Override + public boolean process(AudioEvent audioEvent) { + float[] audioBuffer = audioEvent.getFloatBuffer(); + + int placesWithZero = 0; + int zeroCounter = 0; + for(int i = 0 ; i < audioBuffer.length ; i++){ + if(audioBuffer[i]==0 && zeroCounter < compression){ + zeroCounter++; + placesWithZero = placesWithZero | (1<>> 8); + } + } + + try { + rawOutputStream.write(byteBuffer); + rawOutputStream.write((byte) placesWithZero); + rawOutputStream.write((byte) (placesWithZero>>>8)); + rawOutputStream.write((byte) (placesWithZero>>>16)); + rawOutputStream.write((byte) (placesWithZero>>>24)); + + } catch (IOException e) { + e.printStackTrace(); + } + + + return true; + } + + @Override + public void processingFinished() { + try { + rawOutputStream.close(); + } catch (IOException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/wavelet/HaarWaveletTransform.java b/app/src/main/java/be/tarsos/dsp/wavelet/HaarWaveletTransform.java new file mode 100644 index 0000000..b71d034 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/wavelet/HaarWaveletTransform.java @@ -0,0 +1,140 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + +package be.tarsos.dsp.wavelet; + +public class HaarWaveletTransform { + + private final boolean preserveEnergy; + private final float sqrtTwo = (float) Math.sqrt(2.0); + + public HaarWaveletTransform(boolean preserveEnergy){ + this.preserveEnergy = preserveEnergy; + } + + public HaarWaveletTransform(){ + this(false); + } + + /** + * Does an in-place HaarWavelet wavelet transform. The + * length of data needs to be a power of two. + * It is based on the algorithm found in "Wavelets Made Easy" by Yves Nivergelt, page 24. + * @param s The data to transform. + */ + public void transform(float[] s){ + int m = s.length; + assert isPowerOfTwo(m); + int n = log2(m); + int j = 2; + int i = 1; + for(int l = 0 ; l < n ; l++ ){ + m = m/2; + for(int k=0; k < m;k++){ + float a = (s[j*k]+s[j*k + i])/2.0f; + float c = (s[j*k]-s[j*k + i])/2.0f; + if(preserveEnergy){ + a = a/sqrtTwo; + c = c/sqrtTwo; + } + s[j*k] = a; + s[j*k+i] = c; + } + i = j; + j = j * 2; + } + } + + /** + * Does an in-place inverse HaarWavelet Wavelet Transform. The data needs to be a power of two. + * It is based on the algorithm found in "Wavelets Made Easy" by Yves Nivergelt, page 29. + * @param data The data to transform. + */ + public void inverseTransform(float[] data){ + int m = data.length; + assert isPowerOfTwo(m); + int n = log2(m); + int i = pow2(n-1); + int j = 2 * i; + m = 1; + for(int l = n ; l >= 1; l--){ + for(int k = 0; k < m ; k++){ + float a = data[j*k]+data[j*k+i]; + float a1 = data[j*k]-data[j*k+i]; + if(preserveEnergy){ + a = a*sqrtTwo; + a1 = a1*sqrtTwo; + } + data[j*k] = a; + data[j*k+i] = a1; + } + j = i; + i = i /2; + m = 2*m; + } + } + + + + /** + * Checks if the number is a power of two. For performance it uses bit shift + * operators. e.g. 4 in binary format is + * "0000 0000 0000 0000 0000 0000 0000 0100"; and -4 is + * "1111 1111 1111 1111 1111 1111 1111 1100"; and 4 & -4 will be + * "0000 0000 0000 0000 0000 0000 0000 0100"; + * + * @param number + * The number to check. + * @return True if the number is a power of two, false otherwise. + */ + public static boolean isPowerOfTwo(int number) { + if (number <= 0) { + throw new IllegalArgumentException("number: " + number); + } + return (number & -number) == number; + } + + /** + * A quick and simple way to calculate log2 of integers. + * + * @param bits + * the integer + * @return log2(bits) + */ + public static int log2(int bits) { + if (bits == 0) { + return 0; + } + return 31 - Integer.numberOfLeadingZeros(bits); + } + + /** + * A quick way to calculate the power of two (2^power), by using bit shifts. + * @param power The power. + * @return 2^power + */ + public static int pow2(int power) { + return 1<> 1; + + for (int n = 0; n < half; n++) { + if (direction == forward) { + S[n] = ((sqrt3 - 1.0f) / sqrt2) * S[n]; + S[n + half] = ((sqrt3 + 1.0f) / sqrt2) * S[n + half]; + } else if (direction == inverse) { + S[n] = ((sqrt3 + 1.0f) / sqrt2) * S[n]; + S[n + half] = ((sqrt3 - 1.0f) / sqrt2) * S[n + half]; + } else { + System.out + .println("Daubechies4Wavelet::normalize: bad direction value"); + break; + } + } + } // normalize + + protected void predict(float[] S, int N, int direction) { + int half = N >> 1; + + if (direction == forward) { + S[half] = S[half] - (sqrt3 / 4.0f) * S[0] + - (((sqrt3 - 2) / 4.0f) * S[half - 1]); + } else if (direction == inverse) { + S[half] = S[half] + (sqrt3 / 4.0f) * S[0] + + (((sqrt3 - 2) / 4.0f) * S[half - 1]); + } else { + System.out + .println("Daubechies4Wavelet::predict: bad direction value"); + } + + // predict, forward + + for (int n = 1; n < half; n++) { + if (direction == forward) { + S[half + n] = S[half + n] - (sqrt3 / 4.0f) * S[n] + - (((sqrt3 - 2) / 4.0f) * S[n - 1]); + } else if (direction == inverse) { + S[half + n] = S[half + n] + (sqrt3 / 4.0f) * S[n] + + (((sqrt3 - 2) / 4.0f) * S[n - 1]); + } else { + break; + } + } + + } // predict + + protected void updateOne(float[] S, int N, int direction) { + int half = N >> 1; + + for (int n = 0; n < half; n++) { + float updateVal = sqrt3 * S[half + n]; + + if (direction == forward) { + S[n] = S[n] + updateVal; + } else if (direction == inverse) { + S[n] = S[n] - updateVal; + } else { + System.out + .println("Daubechies4Wavelet::updateOne: bad direction value"); + break; + } + } + } // updateOne + + protected void update(float[] S, int N, int direction) { + int half = N >> 1; + + for (int n = 0; n < half - 1; n++) { + if (direction == forward) { + S[n] = S[n] - S[half + n + 1]; + } else if (direction == inverse) { + S[n] = S[n] + S[half + n + 1]; + } else { + System.out + .println("Daubechies4Wavelet::update: bad direction value"); + break; + } + } + + if (direction == forward) { + S[half - 1] = S[half - 1] - S[half]; + } else if (direction == inverse) { + S[half - 1] = S[half - 1] + S[half]; + } + } // update + + public void forwardTrans(float[] vec) { + final int N = vec.length; + + for (int n = N; n > 1; n = n >> 1) { + split(vec, n); + updateOne(vec, n, forward); // update 1 + predict(vec, n, forward); + update(vec, n, forward); // update 2 + normalize(vec, n, forward); + } + } // forwardTrans + + /** + *

+ * Default two step Lifting Scheme inverse wavelet transform + *

+ * + *

+ * inverseTrans is passed the result of an ordered wavelet transform, + * consisting of an average and a set of wavelet coefficients. The inverse + * transform is calculated in-place and the result is returned in the + * argument array. + *

+ */ + public void inverseTrans(float[] vec) { + final int N = vec.length; + + for (int n = 2; n <= N; n = n << 1) { + normalize(vec, n, inverse); + update(vec, n, inverse); + predict(vec, n, inverse); + updateOne(vec, n, inverse); + merge(vec, n); + } + } // inverseTrans +} diff --git a/app/src/main/java/be/tarsos/dsp/wavelet/lift/Daubechies4WaveletCoder.java b/app/src/main/java/be/tarsos/dsp/wavelet/lift/Daubechies4WaveletCoder.java new file mode 100644 index 0000000..7e6269f --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/wavelet/lift/Daubechies4WaveletCoder.java @@ -0,0 +1,82 @@ +/* + * _______ _____ _____ _____ + * |__ __| | __ \ / ____| __ \ + * | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | + * | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ + * | | (_| | | \__ \ (_) \__ \ |__| |____) | | + * |_|\__,_|_| |___/\___/|___/_____/|_____/|_| + * + * ------------------------------------------------------------- + * + * TarsosDSP is developed by Joren Six at IPEM, University Ghent + * + * ------------------------------------------------------------- + * + * Info: http://0110.be/tag/TarsosDSP + * Github: https://github.com/JorenSix/TarsosDSP + * Releases: http://0110.be/releases/TarsosDSP/ + * + * TarsosDSP includes modified source code by various authors, + * for credits and info, see README. + * + */ + +package be.tarsos.dsp.wavelet.lift; + +import java.util.Arrays; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; + +public class Daubechies4WaveletCoder implements AudioProcessor { + + private final Daubechies4Wavelet transform; + + private int compression; + + public Daubechies4WaveletCoder() { + this(16); + } + + public Daubechies4WaveletCoder(int compression) { + transform = new Daubechies4Wavelet(); + this.compression = compression; + } + + @Override + public boolean process(AudioEvent audioEvent) { + + float[] audioBuffer = audioEvent.getFloatBuffer(); + float[] sortBuffer = new float[audioBuffer.length]; + + transform.forwardTrans(audioBuffer); + + for (int i = 0; i < sortBuffer.length; i++) { + sortBuffer[i] = Math.abs(audioBuffer[i]); + } + Arrays.sort(sortBuffer); + + double threshold = sortBuffer[compression]; + + for (int i = 0; i < audioBuffer.length; i++) { + if (Math.abs(audioBuffer[i]) <= threshold) { + audioBuffer[i] = 0; + } + } + return true; + } + + @Override + public void processingFinished() { + + } + + public void setCompression(int compression) { + this.compression = compression; + } + + public int getCompression() { + return this.compression; + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/wavelet/lift/Daubechies4WaveletDecoder.java b/app/src/main/java/be/tarsos/dsp/wavelet/lift/Daubechies4WaveletDecoder.java new file mode 100644 index 0000000..a278fa9 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/wavelet/lift/Daubechies4WaveletDecoder.java @@ -0,0 +1,49 @@ +/* + * _______ _____ _____ _____ + * |__ __| | __ \ / ____| __ \ + * | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | + * | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ + * | | (_| | | \__ \ (_) \__ \ |__| |____) | | + * |_|\__,_|_| |___/\___/|___/_____/|_____/|_| + * + * ------------------------------------------------------------- + * + * TarsosDSP is developed by Joren Six at IPEM, University Ghent + * + * ------------------------------------------------------------- + * + * Info: http://0110.be/tag/TarsosDSP + * Github: https://github.com/JorenSix/TarsosDSP + * Releases: http://0110.be/releases/TarsosDSP/ + * + * TarsosDSP includes modified source code by various authors, + * for credits and info, see README. + * + */ + +package be.tarsos.dsp.wavelet.lift; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; + +public class Daubechies4WaveletDecoder implements AudioProcessor { + + private final Daubechies4Wavelet transform; + + public Daubechies4WaveletDecoder() { + transform = new Daubechies4Wavelet(); + } + + @Override + public boolean process(AudioEvent audioEvent) { + float[] audioBuffer = audioEvent.getFloatBuffer(); + transform.inverseTrans(audioBuffer); + return true; + } + + @Override + public void processingFinished() { + + } + +} diff --git a/app/src/main/java/be/tarsos/dsp/wavelet/lift/HaarWavelet.java b/app/src/main/java/be/tarsos/dsp/wavelet/lift/HaarWavelet.java new file mode 100644 index 0000000..40203c5 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/wavelet/lift/HaarWavelet.java @@ -0,0 +1,139 @@ +package be.tarsos.dsp.wavelet.lift; + +/** + *

+ * HaarWavelet (flat LineWavelet) wavelet. + *

+ * + *

+ * As with all Lifting scheme wavelet transform functions, the first stage of a + * transform step is the split stage. The split step moves the even element to + * the first half of an N element region and the odd elements to the second half + * of the N element region. + *

+ * + *

+ * The Lifting Scheme version of the HaarWavelet transform uses a wavelet + * function (predict stage) that "predicts" that an odd element will have the + * same value as it preceeding even element. Stated another way, the odd element + * is "predicted" to be on a flat (zero slope LineWavelet) shared with the even + * point. The difference between this "prediction" and the actual odd value + * replaces the odd element. + *

+ * + *

+ * The wavelet scaling function (a.k.a. smoothing function) used in the update + * stage calculates the average between an even and an odd element. + *

+ * + *

+ * The merge stage at the end of the inverse transform interleaves odd and even + * elements from the two halves of the array (e.g., ordering them + * even0, odd0, even1, odd1, ...) + *

+ * + * + * Copyright and Use + * + *

+ * You may use this source code without limitation and without fee as long as + * you include: + *

+ *
This software was written and is copyrighted by Ian Kaplan, Bear + * Products International, www.bearcave.com, 2001.
+ *

+ * This software is provided "as is", without any warrenty or claim as to its + * usefulness. Anyone who uses this source code uses it at their own risk. Nor + * is any support provided by Ian Kaplan and Bear Products International. + *

+ * Please send any bug fixes or suggested source changes to: + * + *

+ *      iank@bearcave.com
+ * 
+ * + * @author Ian Kaplan + */ +public class HaarWavelet extends LiftingSchemeBaseWavelet { + + /** + * HaarWavelet predict step + */ + protected void predict(float[] vec, int N, int direction) { + int half = N >> 1; + + for (int i = 0; i < half; i++) { + float predictVal = vec[i]; + int j = i + half; + + if (direction == forward) { + vec[j] = vec[j] - predictVal; + } else if (direction == inverse) { + vec[j] = vec[j] + predictVal; + } else { + System.out.println("HaarWavelet::predict: bad direction value"); + } + } + } + + /** + * Transform forward + * @param vec the vector to update. + */ + public void forwardTransOne(float[] vec) { + final int N = vec.length; + + split(vec, N); + predict(vec, N, forward); + update(vec, N, forward); + + } // forwardTrans + + /** + *

+ * Update step of the HaarWavelet wavelet transform. + *

+ *

+ * The wavelet transform calculates a set of detail or difference + * coefficients in the predict step. These are stored in the upper half of + * the array. The update step calculates an average from the even-odd + * element pairs. The averages will replace the even elements in the lower + * half of the array. + *

+ *

+ * The HaarWavelet wavelet calculation used in the Lifting Scheme is + *

+ * + *
+	 *        d j+1, i = odd j+1, i  = odd j, i  - even j, i
+	 * 	      a j+1, i  = even j, i  = (even j, i  + odd j, i )/2
+	 * 
+ *

+ * Note that the Lifting Scheme uses an in-place algorithm. The odd elements + * have been replaced by the detail coefficients in the predict step. With a + * little algebra we can substitute the coefficient calculation into the + * average calculation, which gives us + *

+ * + *
+	 *        a j+1, i  = even j, i  = even j, i  + (odd j, i /2)
+	 * 
+ */ + protected void update(float[] vec, int N, int direction) { + int half = N >> 1; + + for (int i = 0; i < half; i++) { + int j = i + half; + float updateVal = vec[j] / 2.0f; + + if (direction == forward) { + vec[i] = vec[i] + updateVal; + } else if (direction == inverse) { + vec[i] = vec[i] - updateVal; + } else { + System.out.println("update: bad direction value"); + } + } + } + +} // HaarWavelet diff --git a/app/src/main/java/be/tarsos/dsp/wavelet/lift/HaarWithPolynomialInterpolationWavelet.java b/app/src/main/java/be/tarsos/dsp/wavelet/lift/HaarWithPolynomialInterpolationWavelet.java new file mode 100644 index 0000000..5e89a77 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/wavelet/lift/HaarWithPolynomialInterpolationWavelet.java @@ -0,0 +1,229 @@ +package be.tarsos.dsp.wavelet.lift; + +/** + *

+ * HaarWavelet transform extended with a polynomial interpolation step + *

+ *

+ * This wavelet transform extends the HaarWavelet transform with a polynomial + * wavelet function. + *

+ *

+ * The polynomial wavelet uses 4-point polynomial interpolation to "predict" an + * odd point from four even point values. + *

+ *

+ * This class extends the HaarWavelet transform with an interpolation stage + * which follows the predict and update stages of the HaarWavelet transform. The + * predict value is calculated from the even points, which in this case are the + * smoothed values calculated by the scaling function (e.g., the averages of the + * even and odd values). + *

+ * + *

+ * The predict value is subtracted from the current odd value, which is the + * result of the HaarWavelet wavelet function (e.g., the difference between the + * odd value and the even value). This tends to result in large odd values after + * the interpolation stage, which is a weakness in this algorithm. + *

+ * + *

+ * This algorithm was suggested by Wim Sweldens' tutorial Building Your Own + * Wavelets at Home. + *

+ * + * + * + *
+ *   
+ *   http://www.bearcave.com/misl/misl_tech/wavelets/lifting/index.html
+ * 
+ * + * + * Copyright and Use + * + *

+ * You may use this source code without limitation and without fee as long as + * you include: + *

+ *
This software was written and is copyrighted by Ian Kaplan, Bear + * Products International, www.bearcave.com, 2001.
+ *

+ * This software is provided "as is", without any warrenty or claim as to its + * usefulness. Anyone who uses this source code uses it at their own risk. Nor + * is any support provided by Ian Kaplan and Bear Products International. + *

+ * Please send any bug fixes or suggested source changes to: + * + *

+ *      iank@bearcave.com
+ * 
+ * + * @author Ian Kaplan + */ +public class HaarWithPolynomialInterpolationWavelet extends HaarWavelet { + final static int numPts = 4; + private final PolynomialInterpolation fourPt; + + /** + * HaarWithPolynomialInterpolationWavelet class constructor + */ + public HaarWithPolynomialInterpolationWavelet() { + fourPt = new PolynomialInterpolation(); + } + + /** + *

+ * Copy four points or N (which ever is less) data points from + * vec into d These points are the "known" points used in the + * polynomial interpolation. + *

+ * + * @param vec + * the input data set on which the wavelet is calculated + * @param d + * an array into which N data points, starting at + * start are copied. + * @param N + * the number of polynomial interpolation points + * @param start + * the index in vec from which copying starts + */ + private void fill(float[] vec, float[] d, int N, int start) { + int n = numPts; + if (n > N) + n = N; + int end = start + n; + int j = 0; + + for (int i = start; i < end; i++) { + d[j] = vec[i]; + j++; + } + } // fill + + /** + *

+ * Predict an odd point from the even points, using 4-point polynomial + * interpolation. + *

+ *

+ * The four points used in the polynomial interpolation are the even points. + * We pretend that these four points are located at the x-coordinates + * 0,1,2,3. The first odd point interpolated will be located between the + * first and second even point, at 0.5. The next N-3 points are located at + * 1.5 (in the middle of the four points). The last two points are located + * at 2.5 and 3.5. For complete documentation see + *

+ * + *
+	 *   
+	 *   http://www.bearcave.com/misl/misl_tech/wavelets/lifting/index.html
+	 * 
+ * + *

+ * The difference between the predicted (interpolated) value and the actual + * odd value replaces the odd value in the forward transform. + *

+ * + *

+ * As the recursive steps proceed, N will eventually be 4 and then 2. When N + * = 4, linear interpolation is used. When N = 2, HaarWavelet interpolation + * is used (the prediction for the odd value is that it is equal to the even + * value). + *

+ * + * @param vec + * the input data on which the forward or inverse transform is + * calculated. + * @param N + * the area of vec over which the transform is calculated + * @param direction + * forward or inverse transform + */ + protected void interp(float[] vec, int N, int direction) { + int half = N >> 1; + float[] d = new float[numPts]; + + // int k = 42; + + for (int i = 0; i < half; i++) { + float predictVal; + + if (i == 0) { + if (half == 1) { + // e.g., N == 2, and we use HaarWavelet interpolation + predictVal = vec[0]; + } else { + fill(vec, d, N, 0); + predictVal = fourPt.interpPoint(0.5f, half, d); + } + } else if (i == 1) { + predictVal = fourPt.interpPoint(1.5f, half, d); + } else if (i == half - 2) { + predictVal = fourPt.interpPoint(2.5f, half, d); + } else if (i == half - 1) { + predictVal = fourPt.interpPoint(3.5f, half, d); + } else { + fill(vec, d, N, i - 1); + predictVal = fourPt.interpPoint(1.5f, half, d); + } + + int j = i + half; + if (direction == forward) { + vec[j] = vec[j] - predictVal; + } else if (direction == inverse) { + vec[j] = vec[j] + predictVal; + } else { + System.out + .println("PolynomialWavelets::predict: bad direction value"); + } + } + } // interp + + /** + *

+ * HaarWavelet transform extened with polynomial interpolation forward + * transform. + *

+ *

+ * This version of the forwardTrans function overrides the function in the + * LiftingSchemeBaseWavelet base class. This function introduces an extra + * polynomial interpolation stage at the end of the transform. + *

+ */ + public void forwardTrans(float[] vec) { + final int N = vec.length; + + for (int n = N; n > 1; n = n >> 1) { + split(vec, n); + predict(vec, n, forward); + update(vec, n, forward); + interp(vec, n, forward); + } // for + } // forwardTrans + + /** + *

+ * HaarWavelet transform extened with polynomial interpolation inverse + * transform. + *

+ *

+ * This version of the inverseTrans function overrides the function in the + * LiftingSchemeBaseWavelet base class. This function introduces an inverse + * polynomial interpolation stage at the start of the inverse transform. + *

+ */ + public void inverseTrans(float[] vec) { + final int N = vec.length; + + for (int n = 2; n <= N; n = n << 1) { + interp(vec, n, inverse); + update(vec, n, inverse); + predict(vec, n, inverse); + merge(vec, n); + } + } // inverseTrans + +} // HaarWithPolynomialInterpolationWavelet + diff --git a/app/src/main/java/be/tarsos/dsp/wavelet/lift/LiftingSchemeBaseWavelet.java b/app/src/main/java/be/tarsos/dsp/wavelet/lift/LiftingSchemeBaseWavelet.java new file mode 100644 index 0000000..b2c09f6 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/wavelet/lift/LiftingSchemeBaseWavelet.java @@ -0,0 +1,223 @@ +package be.tarsos.dsp.wavelet.lift; + +/** + *

+ * class LiftingSchemeBaseWavelet: base class for simple Lifting Scheme wavelets + * using split, predict, update or update, predict, merge steps. + *

+ * + *

+ * Simple lifting scheme wavelets consist of three steps, a split/merge step, + * predict step and an update step: + *

+ *
    + *
  • + *

    + * The split step divides the elements in an array so that the even elements are + * in the first half and the odd elements are in the second half. + *

    + *
  • + *
  • + *

    + * The merge step is the inverse of the split step. It takes two regions of an + * array, an odd region and an even region and merges them into a new region + * where an even element alternates with an odd element. + *

    + *
  • + *
  • + *

    + * The predict step calculates the difference between an odd element and its + * predicted value based on the even elements. The difference between the + * predicted value and the actual value replaces the odd element. + *

    + *
  • + *
  • + *

    + * The predict step operates on the odd elements. The update step operates on + * the even element, replacing them with a difference between the predict value + * and the actual odd element. The update step replaces each even element with + * an average. The result of the update step becomes the input to the next + * recursive step in the wavelet calculation. + *

    + *
  • + * + *
+ * + *

+ * The split and merge methods are shared by all Lifting Scheme wavelet + * algorithms. This base class provides the transform and inverse transform + * methods (forwardTrans and inverseTrans). The predict and update methods are + * abstract and are defined for a particular Lifting Scheme wavelet sub-class. + *

+ * + *

+ * References: + *

+ * + *
    + *
  • + * + * The Wavelet Lifting Scheme by Ian Kaplan, www.bearcave.com. This + * is the parent web page for this Java source code.
  • + *
  • + * Ripples in Mathematics: the Discrete Wavelet Transform by Arne Jense + * and Anders la Cour-Harbo, Springer, 2001
  • + *
  • + * Building Your Own Wavelets at Home in + * Wavelets in Computer Graphics
  • + *
+ * + * + * Copyright and Use + * + *

+ * You may use this source code without limitation and without fee as long as + * you include: + *

+ *
This software was written and is copyrighted by Ian Kaplan, Bear + * Products International, www.bearcave.com, 2001.
+ *

+ * This software is provided "as is", without any warrenty or claim as to its + * usefulness. Anyone who uses this source code uses it at their own risk. Nor + * is any support provided by Ian Kaplan and Bear Products International. + *

+ * Please send any bug fixes or suggested source changes to: + * + *

+ *      iank@bearcave.com
+ * 
+ * + * @author Ian Kaplan + */ +public abstract class LiftingSchemeBaseWavelet { + + /** "enumeration" for forward wavelet transform */ + protected final int forward = 1; + /** "enumeration" for inverse wavelet transform */ + protected final int inverse = 2; + + /** + * Split the vec into even and odd elements, where the even elements + * are in the first half of the vector and the odd elements are in the + * second half. + */ + protected void split(float[] vec, int N) { + + int start = 1; + int end = N - 1; + + while (start < end) { + for (int i = start; i < end; i = i + 2) { + float tmp = vec[i]; + vec[i] = vec[i + 1]; + vec[i + 1] = tmp; + } + start = start + 1; + end = end - 1; + } + } + + /** + * Merge the odd elements from the second half of the N element region in + * the array with the even elements in the first half of the N element + * region. The result will be the combination of the odd and even elements + * in a region of length N. + */ + protected void merge(float[] vec, int N) { + int half = N >> 1; + int start = half - 1; + int end = half; + + while (start > 0) { + for (int i = start; i < end; i = i + 2) { + float tmp = vec[i]; + vec[i] = vec[i + 1]; + vec[i + 1] = tmp; + } + start = start - 1; + end = end + 1; + } + } + + /** + * Predict step, to be defined by the subclass + * + * @param vec + * input array + * @param N + * size of region to act on (from 0..N-1) + * @param direction + * forward or inverse transform + */ + protected abstract void predict(float[] vec, int N, int direction); + + /** + * Update step, to be defined by the subclass + * + * @param vec + * input array + * @param N + * size of region to act on (from 0..N-1) + * @param direction + * forward or inverse transform + */ + protected abstract void update(float[] vec, int N, int direction); + + /** + *

+ * Simple wavelet Lifting Scheme forward transform + *

+ * + *

+ * forwardTrans is passed an array of doubles. The array size must be a + * power of two. Lifting Scheme wavelet transforms are calculated in-place + * and the result is returned in the argument array. + *

+ * + *

+ * The result of forwardTrans is a set of wavelet coefficients ordered by + * increasing frequency and an approximate average of the input data set in + * vec[0]. The coefficient bands follow this element in powers of two (e.g., + * 1, 2, 4, 8...). + *

+ * + * @param vec + * the vector + */ + public void forwardTrans(float[] vec) { + final int N = vec.length; + + for (int n = N; n > 1; n = n >> 1) { + split(vec, n); + predict(vec, n, forward); + update(vec, n, forward); + } + } // forwardTrans + + /** + *

+ * Default two step Lifting Scheme inverse wavelet transform + *

+ * + *

+ * inverseTrans is passed the result of an ordered wavelet transform, + * consisting of an average and a set of wavelet coefficients. The inverse + * transform is calculated in-place and the result is returned in the + * argument array. + *

+ * + * @param vec + * the vector + */ + public void inverseTrans(float[] vec) { + final int N = vec.length; + + for (int n = 2; n <= N; n = n << 1) { + update(vec, n, inverse); + predict(vec, n, inverse); + merge(vec, n); + } + } // inverseTrans + +} // LiftingSchemeBaseWavelet diff --git a/app/src/main/java/be/tarsos/dsp/wavelet/lift/LiftingSchemeTest.java b/app/src/main/java/be/tarsos/dsp/wavelet/lift/LiftingSchemeTest.java new file mode 100644 index 0000000..6417c6e --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/wavelet/lift/LiftingSchemeTest.java @@ -0,0 +1,111 @@ +package be.tarsos.dsp.wavelet.lift; + + +class LiftingSchemeTest { + + private static void print(float[] values) { + System.out.print("["); + for (double val : values) { + System.out.print(val); + System.out.print(","); + } + System.out.println("]"); + } + + public static void main(String[] args) { + + /* + * double vals[] = { 32.0, 10.0, 20.0, 38.0, 37.0, 28.0, 38.0, 34.0, + * 18.0, 24.0, 18.0, 9.0, 23.0, 24.0, 28.0, 34.0 }; + */ + float[] vals = { 25, 40, 8, 24, 48, 48, 40, 16 }; + /* + * double vals[] = { 77.6875, 78.1875, 82.0625, 85.5625, 86.7500, + * 82.4375, 82.2500, 82.7500, 81.2500, 79.5625, 80.2813, 79.8750, + * 77.7500, 74.7500, 78.5000, 79.1875, 78.8125, 80.3125, 80.1250, + * 79.3125, 83.7500, 89.8125, 87.7500, 91.1250, 94.4375, 92.7500, + * 98.0000, 97.1875, 99.4375, 101.7500, 108.5000, 109.0000, 105.2500, + * 105.5000, 110.0000, 107.0000, 107.2500, 103.3125, 102.8750, 102.4375, + * 102.0000, 101.3125, 97.4375, 100.5000, 107.7500, 110.2500, 114.3125, + * 111.2500, 114.8125, 112.6875, 109.4375, 108.0625, 104.5625, 103.2500, + * 110.5625, 110.7500, 116.3125, 123.6250, 120.9375, 121.6250, 127.6875, + * 126.0625, 126.3750, 124.3750 }; + */ + + HaarWavelet hr = new HaarWavelet(); + LineWavelet ln = new LineWavelet(); + Daubechies4Wavelet d = new Daubechies4Wavelet(); + + HaarWithPolynomialInterpolationWavelet hrpy = new HaarWithPolynomialInterpolationWavelet(); + PolynomialWavelets py = new PolynomialWavelets(); + + System.out.println("Data:"); + print(vals); + System.out.println(); + + System.out.println("HaarWavelet:"); + hr.forwardTrans(vals); + print(vals); + System.out.println(); + hr.inverseTrans(vals); + print(vals); + System.out.println(); + + System.out.println("Daubechies4Wavelet:"); + d.forwardTrans(vals); + print(vals); + System.out.println(); + d.inverseTrans(vals); + print(vals); + System.out.println(); + + System.out.println("Line:"); + ln.forwardTrans(vals); + print(vals); + System.out.println(); + ln.inverseTrans(vals); + print(vals); + System.out.println(); + + System.out + .println("HaarWavelet, extended with polynomial interpolation:"); + hrpy.forwardTrans(vals); + print(vals); + System.out.println(); + hrpy.inverseTrans(vals); + print(vals); + System.out.println(); + + System.out.println("Poly:"); + py.forwardTrans(vals); + print(vals); + System.out.println(); + py.inverseTrans(vals); + print(vals); + System.out.println(); + + float[] t = { 56, 40, 8, 24, 48, 48, 40, 16 }; + hr.forwardTransOne(t); + + float[] signal = { 56, 40, 8, 24, 48, 48, 40, 16 }; + dwtHaar(signal); + } + + private static void dwtHaar(float[] signal) { + float[] s = new float[signal.length]; + float[] d = new float[signal.length]; + for (int i = 0; i < signal.length / 2; i++) { + s[i] = (signal[2 * i] + signal[2 * i + 1]) / 2.0f; + d[i] = signal[2 * i] - s[i]; + } + print(s); + print(d); + } + + /* + * private static void decompose(float[] signal) { int length = + * signal.length; int steps = (int) Math.round(Math.log(length) / + * Math.log(2)); } + */ + +} diff --git a/app/src/main/java/be/tarsos/dsp/wavelet/lift/LineWavelet.java b/app/src/main/java/be/tarsos/dsp/wavelet/lift/LineWavelet.java new file mode 100644 index 0000000..f49e06a --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/wavelet/lift/LineWavelet.java @@ -0,0 +1,268 @@ +package be.tarsos.dsp.wavelet.lift; + +/** + *

+ * Line (with slope) wavelet + *

+ * + *

+ * The wavelet Lifting Scheme "LineWavelet" wavelet approximates the data set + * using a LineWavelet with with slope (in contrast to the HaarWavelet wavelet + * where a LineWavelet has zero slope is used to approximate the data). + *

+ * + *

+ * The predict stage of the LineWavelet wavelet "predicts" that an odd point + * will lie midway between its two neighboring even points. That is, that the + * odd point will lie on a LineWavelet between the two adjacent even points. The + * difference between this "prediction" and the actual odd value replaces the + * odd element. + *

+ * + *

+ * The update stage calculates the average of the odd and even element pairs, + * although the method is indirect, since the predict phase has over written the + * odd value. + *

+ * + * + * Copyright and Use + * + *

+ * You may use this source code without limitation and without fee as long as + * you include: + *

+ *
This software was written and is copyrighted by Ian Kaplan, Bear + * Products International, www.bearcave.com, 2001.
+ *

+ * This software is provided "as is", without any warrenty or claim as to its + * usefulness. Anyone who uses this source code uses it at their own risk. Nor + * is any support provided by Ian Kaplan and Bear Products International. + *

+ * Please send any bug fixes or suggested source changes to: + * + *

+ *      iank@bearcave.com
+ * 
+ * + * @author Ian Kaplan + */ +public class LineWavelet extends LiftingSchemeBaseWavelet { + + /** + *

+ * Calculate an extra "even" value for the LineWavelet wavelet algorithm at + * the end of the data series. Here we pretend that the last two values in + * the data series are at the x-axis coordinates 0 and 1, respectively. We + * then need to calculate the y-axis value at the x-axis coordinate 2. This + * point lies on a LineWavelet running through the points at 0 and 1. + *

+ *

+ * Given two points, x 1 , y 1 and x 2 , + * y 2 , where + *

+ * + *
+	 *         x 1  = 0
+	 *         x 2  = 1
+	 * 
+ *

+ * calculate the point on the LineWavelet at x 3 , y 3 , + * where + *

+ * + *
+	 *         x 3  = 2
+	 * 
+ *

+ * The "two-point equation" for a LineWavelet given x 1 , + * y 1 and x 2 , y 2 is + *

+ * + *
+	 *      .          y 2  - y 1 
+	 *      (y - y 1 ) = -------- (x - x 1 )
+	 *      .          x 2  - x 1 
+	 * 
+ *

+ * Solving for y + *

+ * + *
+	 *      .    y 2  - y 1 
+	 *      y = -------- (x - x 1 ) + y 1 
+	 *      .    x 2  - x 1 
+	 * 
+ *

+ * Since x 1 = 0 and x 2 = 1 + *

+ * + *
+	 *      .    y 2  - y 1 
+	 *      y = -------- (x - 0) + y 1 
+	 *      .    1 - 0
+	 * 
+ *

+ * or + *

+ * + *
+	 *      y = (y 2  - y 1 )*x + y 1 
+	 * 
+ *

+ * We're calculating the value at x 3 = 2, so + *

+ * + *
+	 *      y = 2*y 2  - 2*y 1  + y 1 
+	 * 
+ *

+ * or + *

+ * + *
+	 *      y = 2*y 2  - y 1 
+	 * 
+ */ + private float new_y(float y1, float y2) { + float y = 2 * y2 - y1; + return y; + } + + /** + *

+ * Predict phase of LineWavelet Lifting Scheme wavelet + *

+ * + *

+ * The predict step attempts to "predict" the value of an odd element from + * the even elements. The difference between the prediction and the actual + * element is stored as a wavelet coefficient. + *

+ *

+ * The "predict" step takes place after the split step. The split step will + * move the odd elements (b j ) to the second half of the array, + * leaving the even elements (a i ) in the first half + *

+ * + *
+	 *     a 0 , a 1 , a 1 , a 3 , b 0 , b 1 , b 2 , b 2 ,
+	 * 
+ *

+ * The predict step of the LineWavelet wavelet "predicts" that the odd + * element will be on a LineWavelet between two even elements. + *

+ * + *
+	 *     b j+1,i  = b j,i  - (a j,i  + a j,i+1 )/2
+	 * 
+ *

+ * Note that when we get to the end of the data series the odd element is + * the last element in the data series (remember, wavelet algorithms work on + * data series with 2n elements). Here we "predict" that the odd + * element will be on a LineWavelet that runs through the last two even + * elements. This can be calculated by assuming that the last two even + * elements are located at x-axis coordinates 0 and 1, respectively. The odd + * element will be at 2. The new_y() function is called to do this + * simple calculation. + *

+ */ + protected void predict(float[] vec, int N, int direction) { + int half = N >> 1; + float predictVal; + + for (int i = 0; i < half; i++) { + int j = i + half; + if (i < half - 1) { + predictVal = (vec[i] + vec[i + 1]) / 2; + } else if (N == 2) { + predictVal = vec[0]; + } else { + // calculate the last "odd" prediction + predictVal = new_y(vec[i - 1], vec[i]); + } + + if (direction == forward) { + vec[j] = vec[j] - predictVal; + } else if (direction == inverse) { + vec[j] = vec[j] + predictVal; + } else { + System.out.println("predictline::predict: bad direction value"); + } + } + } // predict + + /** + *

+ * The predict phase works on the odd elements in the second half of the + * array. The update phase works on the even elements in the first half of + * the array. The update phase attempts to preserve the average. After the + * update phase is completed the average of the even elements should be + * approximately the same as the average of the input data set from the + * previous iteration. The result of the update phase becomes the input for + * the next iteration. + *

+ *

+ * In a HaarWavelet wavelet the average that replaces the even element is + * calculated as the average of the even element and its associated odd + * element (e.g., its odd neighbor before the split). This is not possible + * in the LineWavelet wavelet since the odd element has been replaced by the + * difference between the odd element and the mid-point of its two even + * neighbors. As a result, the odd element cannot be recovered. + *

+ *

+ * The value that is added to the even element to preserve the average is + * calculated by the equation shown below. This equation is given in Wim + * Sweldens' journal articles and his tutorial (Building Your Own + * Wavelets at Home) and in Ripples in Mathematics. A somewhat + * more complete derivation of this equation is provided in Ripples in + * Mathematics by A. Jensen and A. la Cour-Harbo, Springer, 2001. + *

+ *

+ * The equation used to calculate the average is shown below for a given + * iteratin i. Note that the predict phase has already completed, so + * the odd values belong to iteration i+1. + *

+ * + *
+	 *   even i+1,j  = even i,j  op (odd i+1,k-1  + odd i+1,k )/4
+	 * 
+ *

+ * There is an edge problem here, when i = 0 and k = N/2 (e.g., there is no + * k-1 element). We assume that the odd i+1,k-1 is the same as + * odd k . So for the first element this becomes + * + *

+	 *       (2 * odd k )/4
+	 * 
+ *

+ * or + *

+ * + *
+	 *       odd k /2
+	 * 
+ */ + protected void update(float[] vec, int N, int direction) { + int half = N >> 1; + + for (int i = 0; i < half; i++) { + int j = i + half; + float val; + + if (i == 0) { + val = vec[j] / 2.0f; + } else { + val = (vec[j - 1] + vec[j]) / 4.0f; + } + if (direction == forward) { + vec[i] = vec[i] + val; + } else if (direction == inverse) { + vec[i] = vec[i] - val; + } else { + System.out.println("update: bad direction value"); + } + } // for + } + +} // LineWavelet diff --git a/app/src/main/java/be/tarsos/dsp/wavelet/lift/PolynomialInterpolation.java b/app/src/main/java/be/tarsos/dsp/wavelet/lift/PolynomialInterpolation.java new file mode 100644 index 0000000..1a7f3f1 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/wavelet/lift/PolynomialInterpolation.java @@ -0,0 +1,189 @@ +package be.tarsos.dsp.wavelet.lift; + +/** +* +* @author Ian Kaplan +*/ +class PolynomialInterpolation { + /** number of polynomial interpolation ponts */ + private final static int numPts = 4; + + /** Table for 4-point interpolation coefficients */ + private final float[][] fourPointTable; + + /** Table for 2-point interpolation coefficients */ + private final float[][] twoPointTable; + + /** + *

+ * The polynomial interpolation algorithm assumes that the known points are + * located at x-coordinates 0, 1,.. N-1. An interpolated point is calculated + * at x, using N coefficients. The polynomial coefficients for + * the point x can be calculated staticly, using the Lagrange + * method. + *

+ * + * @param x + * the x-coordinate of the interpolated point + * @param N + * the number of polynomial points. + * @param c + * an array for returning the coefficients + */ + private void lagrange(float x, int N, float[] c) { + float num, denom; + + for (int i = 0; i < N; i++) { + num = 1; + denom = 1; + for (int k = 0; k < N; k++) { + if (i != k) { + num = num * (x - k); + denom = denom * (i - k); + } + } // for k + c[i] = num / denom; + } // for i + } // lagrange + + /** + *

+ * For a given N-point polynomial interpolation, fill the coefficient table, + * for points 0.5 ... (N-0.5). + *

+ */ + private void fillTable(int N, float[][] table) { + float x; + float n = N; + int i = 0; + + for (x = 0.5f; x < n; x = x + 1.0f) { + lagrange(x, N, table[i]); + i++; + } + } // fillTable + + /** + *

+ * PolynomialWavelets constructor + *

+ *

+ * Build the 4-point and 2-point polynomial coefficient tables. + *

+ */ + public PolynomialInterpolation() { + + // Fill in the 4-point polynomial interplation table + // for the points 0.5, 1.5, 2.5, 3.5 + fourPointTable = new float[numPts][numPts]; + + fillTable(numPts, fourPointTable); + + // Fill in the 2-point polynomial interpolation table + // for 0.5 and 1.5 + twoPointTable = new float[2][2]; + + fillTable(2, twoPointTable); + } // PolynomialWavelets constructor + + /** + * Print an N x N table polynomial coefficient table + */ + private void printTable(float[][] table, int N) { + System.out.println(N + "-point interpolation table:"); + double x = 0.5; + for (int i = 0; i < N; i++) { + System.out.print(x + ": "); + for (int j = 0; j < N; j++) { + System.out.print(table[i][j]); + if (j < N - 1) + System.out.print(", "); + } + System.out.println(); + x = x + 1.0; + } + } + + /** + * Print the 4-point and 2-point polynomial coefficient tables. + */ + public void printTables() { + printTable(fourPointTable, numPts); + printTable(twoPointTable, 2); + } // printTables + + /** + *

+ * For the polynomial interpolation point x-coordinate x, + * return the associated polynomial interpolation coefficients. + *

+ * + * @param x + * the x-coordinate for the interpolated pont + * @param n + * the number of polynomial interpolation points + * @param c + * an array to return the polynomial coefficients + */ + private void getCoef(float x, int n, float[] c) { + float[][] table = null; + + int j = (int) x; + if (j < 0 || j >= n) { + System.out.println("PolynomialWavelets::getCoef: n = " + n + + ", bad x value"); + } + + if (n == numPts) { + table = fourPointTable; + } else if (n == 2) { + table = twoPointTable; + c[2] = 0.0f; + c[3] = 0.0f; + } else { + System.out.println("PolynomialWavelets::getCoef: bad value for N"); + } + + if (table != null) { + System.arraycopy(table[j], 0, c, 0, n); + } + } // getCoef + + /** + *

+ * Given four points at the x,y coordinates {0,d0}, + * {1,d1}, {2,d2}, {3,d3} return the + * y-coordinate value for the polynomial interpolated point at + * x. + *

+ * + * @param x + * the x-coordinate for the point to be interpolated + * @param N + * the number of interpolation points + * @param d + * an array containing the y-coordinate values for the known + * points (which are located at x-coordinates 0..N-1). + * @return the y-coordinate value for the polynomial interpolated point at + * x. + */ + public float interpPoint(float x, int N, float[] d) { + float[] c = new float[numPts]; + float point = 0; + + int n = numPts; + if (N < numPts) + n = N; + + getCoef(x, n, c); + + if (n == numPts) { + point = c[0] * d[0] + c[1] * d[1] + c[2] * d[2] + c[3] * d[3]; + } else if (n == 2) { + point = c[0] * d[0] + c[1] * d[1]; + } + + return point; + } // interpPoint + +} // PolynomialInterpolation diff --git a/app/src/main/java/be/tarsos/dsp/wavelet/lift/PolynomialWavelets.java b/app/src/main/java/be/tarsos/dsp/wavelet/lift/PolynomialWavelets.java new file mode 100644 index 0000000..57bad36 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/wavelet/lift/PolynomialWavelets.java @@ -0,0 +1,263 @@ +package be.tarsos.dsp.wavelet.lift; + +/** + *

+ * Polynomial wavelets + *

+ *

+ * This wavelet transform uses a polynomial interpolation wavelet (e.g., the + * function used to calculate the differences). A HaarWavelet scaling function + * (the calculation of the average for the even points) is used. + *

+ *

+ * This wavelet transform uses a two stage version of the lifting scheme. In the + * "classic" two stage Lifting Scheme wavelet the predict stage preceeds the + * update stage. Also, the algorithm is absolutely symetric, with only the + * operators (usually addition and subtraction) interchanged. + *

+ *

+ * The problem with the classic Lifting Scheme transform is that it can be + * difficult to determine how to calculate the smoothing (scaling) function in + * the update phase once the predict stage has altered the odd values. This + * version of the wavelet transform calculates the update stage first and then + * calculates the predict stage from the modified update values. In this case + * the predict stage uses 4-point polynomial interpolation using even values + * that result from the update stage. + *

+ * + *

+ * In this version of the wavelet transform the update stage is no longer + * perfectly symetric, since the forward and inverse transform equations differ + * by more than an addition or subtraction operator. However, this version of + * the transform produces a better result than the HaarWavelet transform + * extended with a polynomial interpolation stage. + *

+ * + *

+ * This algorithm was suggested to me from my reading of Wim Sweldens' tutorial + * Building Your Own Wavelets at Home. + *

+ * + * + * + *
+ *   
+ *   http://www.bearcave.com/misl/misl_tech/wavelets/lifting/index.html
+ * 
+ * + * + * Copyright and Use + * + *

+ * You may use this source code without limitation and without fee as long as + * you include: + *

+ *
This software was written and is copyrighted by Ian Kaplan, Bear + * Products International, www.bearcave.com, 2001.
+ *

+ * This software is provided "as is", without any warrenty or claim as to its + * usefulness. Anyone who uses this source code uses it at their own risk. Nor + * is any support provided by Ian Kaplan and Bear Products International. + *

+ * Please send any bug fixes or suggested source changes to: + * + *

+ *      iank@bearcave.com
+ * 
+ * + * @author Ian Kaplan + */ +public class PolynomialWavelets extends LiftingSchemeBaseWavelet { + final static int numPts = 4; + private final PolynomialInterpolation fourPt; + + /** + * PolynomialWavelets class constructor + */ + public PolynomialWavelets() { + fourPt = new PolynomialInterpolation(); + } + + /** + *

+ * Copy four points or N (which ever is less) data points from + * vec into d These points are the "known" points used in the + * polynomial interpolation. + *

+ * + * @param vec + * the input data set on which the wavelet is calculated + * @param d + * an array into which N data points, starting at + * start are copied. + * @param N + * the number of polynomial interpolation points + * @param start + * the index in vec from which copying starts + */ + private void fill(float[] vec, float[] d, int N, int start) { + int n = numPts; + if (n > N) + n = N; + int end = start + n; + int j = 0; + + for (int i = start; i < end; i++) { + d[j] = vec[i]; + j++; + } + } // fill + + /** + *

+ * The update stage calculates the forward and inverse HaarWavelet scaling + * functions. The forward HaarWavelet scaling function is simply the average + * of the even and odd elements. The inverse function is found by simple + * algebraic manipulation, solving for the even element given the average + * and the odd element. + *

+ *

+ * In this version of the wavelet transform the update stage preceeds the + * predict stage in the forward transform. In the inverse transform the + * predict stage preceeds the update stage, reversing the calculation on the + * odd elements. + *

+ */ + protected void update(float[] vec, int N, int direction) { + int half = N >> 1; + + for (int i = 0; i < half; i++) { + int j = i + half; + // double updateVal = vec[j] / 2.0; + + if (direction == forward) { + vec[i] = (vec[i] + vec[j]) / 2; + } else if (direction == inverse) { + vec[i] = (2 * vec[i]) - vec[j]; + } else { + System.out.println("update: bad direction value"); + } + } + } + + /** + *

+ * Predict an odd point from the even points, using 4-point polynomial + * interpolation. + *

+ *

+ * The four points used in the polynomial interpolation are the even points. + * We pretend that these four points are located at the x-coordinates + * 0,1,2,3. The first odd point interpolated will be located between the + * first and second even point, at 0.5. The next N-3 points are located at + * 1.5 (in the middle of the four points). The last two points are located + * at 2.5 and 3.5. For complete documentation see + *

+ * + *
+	 *   
+	 *   http://www.bearcave.com/misl/misl_tech/wavelets/lifting/index.html
+	 * 
+ * + *

+ * The difference between the predicted (interpolated) value and the actual + * odd value replaces the odd value in the forward transform. + *

+ * + *

+ * As the recursive steps proceed, N will eventually be 4 and then 2. When N + * = 4, linear interpolation is used. When N = 2, HaarWavelet interpolation + * is used (the prediction for the odd value is that it is equal to the even + * value). + *

+ * + * @param vec + * the input data on which the forward or inverse transform is + * calculated. + * @param N + * the area of vec over which the transform is calculated + * @param direction + * forward or inverse transform + */ + protected void predict(float[] vec, int N, int direction) { + int half = N >> 1; + float[] d = new float[numPts]; + + // int k = 42; + + for (int i = 0; i < half; i++) { + float predictVal; + + if (i == 0) { + if (half == 1) { + // e.g., N == 2, and we use HaarWavelet interpolation + predictVal = vec[0]; + } else { + fill(vec, d, N, 0); + predictVal = fourPt.interpPoint(0.5f, half, d); + } + } else if (i == 1) { + predictVal = fourPt.interpPoint(1.5f, half, d); + } else if (i == half - 2) { + predictVal = fourPt.interpPoint(2.5f, half, d); + } else if (i == half - 1) { + predictVal = fourPt.interpPoint(3.5f, half, d); + } else { + fill(vec, d, N, i - 1); + predictVal = fourPt.interpPoint(1.5f, half, d); + } + + int j = i + half; + if (direction == forward) { + vec[j] = vec[j] - predictVal; + } else if (direction == inverse) { + vec[j] = vec[j] + predictVal; + } else { + System.out + .println("PolynomialWavelets::predict: bad direction value"); + } + } + } // predict + + /** + *

+ * Polynomial wavelet lifting scheme transform. + *

+ *

+ * This version of the forwardTrans function overrides the function in the + * LiftingSchemeBaseWavelet base class. This function introduces an extra + * polynomial interpolation stage at the end of the transform. + *

+ */ + public void forwardTrans(float[] vec) { + final int N = vec.length; + + for (int n = N; n > 1; n = n >> 1) { + split(vec, n); + update(vec, n, forward); + predict(vec, n, forward); + } // for + } // forwardTrans + + /** + *

+ * Polynomial wavelet lifting Scheme inverse transform. + *

+ *

+ * This version of the inverseTrans function overrides the function in the + * LiftingSchemeBaseWavelet base class. This function introduces an inverse + * polynomial interpolation stage at the start of the inverse transform. + *

+ */ + public void inverseTrans(float[] vec) { + final int N = vec.length; + + for (int n = 2; n <= N; n = n << 1) { + predict(vec, n, inverse); + update(vec, n, inverse); + merge(vec, n); + } + } // inverseTrans + +} // PolynomialWavelets + diff --git a/app/src/main/java/be/tarsos/dsp/wavelet/lift/package-info.java b/app/src/main/java/be/tarsos/dsp/wavelet/lift/package-info.java new file mode 100644 index 0000000..4afa4f5 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/wavelet/lift/package-info.java @@ -0,0 +1,28 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/** + * Wavelet transforms using the lifting scheme algorithm. Implementation by Ian Kaplan + */ +package be.tarsos.dsp.wavelet.lift; diff --git a/app/src/main/java/be/tarsos/dsp/wavelet/package-info.java b/app/src/main/java/be/tarsos/dsp/wavelet/package-info.java new file mode 100644 index 0000000..0a62b30 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/wavelet/package-info.java @@ -0,0 +1,28 @@ +/* +* _______ _____ _____ _____ +* |__ __| | __ \ / ____| __ \ +* | | __ _ _ __ ___ ___ ___| | | | (___ | |__) | +* | |/ _` | '__/ __|/ _ \/ __| | | |\___ \| ___/ +* | | (_| | | \__ \ (_) \__ \ |__| |____) | | +* |_|\__,_|_| |___/\___/|___/_____/|_____/|_| +* +* ------------------------------------------------------------- +* +* TarsosDSP is developed by Joren Six at IPEM, University Ghent +* +* ------------------------------------------------------------- +* +* Info: http://0110.be/tag/TarsosDSP +* Github: https://github.com/JorenSix/TarsosDSP +* Releases: http://0110.be/releases/TarsosDSP/ +* +* TarsosDSP includes modified source code by various authors, +* for credits and info, see README. +* +*/ + + +/** + * HaarWavelet wavelet calculation and transform. + */ +package be.tarsos.dsp.wavelet; diff --git a/app/src/main/java/be/tarsos/dsp/writer/WaveHeader.java b/app/src/main/java/be/tarsos/dsp/writer/WaveHeader.java new file mode 100644 index 0000000..d44eb77 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/writer/WaveHeader.java @@ -0,0 +1,260 @@ +package be.tarsos.dsp.writer; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +/** + * this source code is copied from : https://android.googlesource.com/platform/frameworks/base.git/+/android-4.3_r2/core/java/android/speech/srec/WaveHeader.java + */ +/** + * This class represents the header of a WAVE format audio file, which usually + * have a .wav suffix. The following integer valued fields are contained: + *
    + *
  • format - usually PCM, ALAW or ULAW. + *
  • numChannels - 1 for mono, 2 for stereo. + *
  • sampleRate - usually 8000, 11025, 16000, 22050, or 44100 hz. + *
  • bitsPerSample - usually 16 for PCM, 8 for ALAW, or 8 for ULAW. + *
  • numBytes - size of audio data after this header, in bytes. + *
+ * + * Not yet ready to be supported, so + */ +public class WaveHeader { + + // follows WAVE format in http://ccrma.stanford.edu/courses/422/projects/WaveFormat + + private static final int HEADER_LENGTH = 44; + + /** Indicates PCM format. */ + public static final short FORMAT_PCM = 1; + /** Indicates ALAW format. */ + public static final short FORMAT_ALAW = 6; + /** Indicates ULAW format. */ + public static final short FORMAT_ULAW = 7; + + private short mFormat; + private short mNumChannels; + private int mSampleRate; + private short mBitsPerSample; + private int mNumBytes; + + /** + * Construct a WaveHeader, with all fields defaulting to zero. + */ + public WaveHeader() { + } + + /** + * Construct a WaveHeader, with fields initialized. + * @param format format of audio data, + * one of {@link #FORMAT_PCM}, {@link #FORMAT_ULAW}, or {@link #FORMAT_ALAW}. + * @param numChannels 1 for mono, 2 for stereo. + * @param sampleRate typically 8000, 11025, 16000, 22050, or 44100 hz. + * @param bitsPerSample usually 16 for PCM, 8 for ULAW or 8 for ALAW. + * @param numBytes size of audio data after this header, in bytes. + */ + public WaveHeader(short format, short numChannels, int sampleRate, short bitsPerSample, int numBytes) { + mFormat = format; + mSampleRate = sampleRate; + mNumChannels = numChannels; + mBitsPerSample = bitsPerSample; + mNumBytes = numBytes; + } + + /** + * Get the format field. + * @return format field, + * one of {@link #FORMAT_PCM}, {@link #FORMAT_ULAW}, or {@link #FORMAT_ALAW}. + */ + public short getFormat() { + return mFormat; + } + + /** + * Set the format field. + * @param format + * one of {@link #FORMAT_PCM}, {@link #FORMAT_ULAW}, or {@link #FORMAT_ALAW}. + * @return reference to this WaveHeader instance. + */ + public WaveHeader setFormat(short format) { + mFormat = format; + return this; + } + + /** + * Get the number of channels. + * @return number of channels, 1 for mono, 2 for stereo. + */ + public short getNumChannels() { + return mNumChannels; + } + + /** + * Set the number of channels. + * @param numChannels 1 for mono, 2 for stereo. + * @return reference to this WaveHeader instance. + */ + public WaveHeader setNumChannels(short numChannels) { + mNumChannels = numChannels; + return this; + } + + /** + * Get the sample rate. + * @return sample rate, typically 8000, 11025, 16000, 22050, or 44100 hz. + */ + public int getSampleRate() { + return mSampleRate; + } + + /** + * Set the sample rate. + * @param sampleRate sample rate, typically 8000, 11025, 16000, 22050, or 44100 hz. + * @return reference to this WaveHeader instance. + */ + public WaveHeader setSampleRate(int sampleRate) { + mSampleRate = sampleRate; + return this; + } + + /** + * Get the number of bits per sample. + * @return number of bits per sample, + * usually 16 for PCM, 8 for ULAW or 8 for ALAW. + */ + public short getBitsPerSample() { + return mBitsPerSample; + } + + /** + * Set the number of bits per sample. + * @param bitsPerSample number of bits per sample, + * usually 16 for PCM, 8 for ULAW or 8 for ALAW. + * @return reference to this WaveHeader instance. + */ + public WaveHeader setBitsPerSample(short bitsPerSample) { + mBitsPerSample = bitsPerSample; + return this; + } + + /** + * Get the size of audio data after this header, in bytes. + * @return size of audio data after this header, in bytes. + */ + public int getNumBytes() { + return mNumBytes; + } + + /** + * Set the size of audio data after this header, in bytes. + * @param numBytes size of audio data after this header, in bytes. + * @return reference to this WaveHeader instance. + */ + public WaveHeader setNumBytes(int numBytes) { + mNumBytes = numBytes; + return this; + } + + /** + * Read and initialize a WaveHeader. + * @param in {@link java.io.InputStream} to read from. + * @return number of bytes consumed. + * @throws IOException + */ + public int read(InputStream in) throws IOException { + /* RIFF header */ + readId(in, "RIFF"); + + readId(in, "WAVE"); + + /* fmt chunk */ + readId(in, "fmt "); + if (16 != readInt(in)) throw new IOException("fmt chunk length not 16"); + mFormat = readShort(in); + mNumChannels = readShort(in); + mSampleRate = readInt(in); + int byteRate = readInt(in); + short blockAlign = readShort(in); + mBitsPerSample = readShort(in); + if (byteRate != mNumChannels * mSampleRate * mBitsPerSample / 8) { + throw new IOException("fmt.ByteRate field inconsistent"); + } + if (blockAlign != mNumChannels * mBitsPerSample / 8) { + throw new IOException("fmt.BlockAlign field inconsistent"); + } + + /* data chunk */ + readId(in, "data"); + mNumBytes = readInt(in); + + return HEADER_LENGTH; + } + + private static void readId(InputStream in, String id) throws IOException { + for (int i = 0; i < id.length(); i++) { + if (id.charAt(i) != in.read()) throw new IOException( id + " tag not present"); + } + } + + private static int readInt(InputStream in) throws IOException { + return in.read() | (in.read() << 8) | (in.read() << 16) | (in.read() << 24); + } + + private static short readShort(InputStream in) throws IOException { + return (short)(in.read() | (in.read() << 8)); + } + + /** + * Write a WAVE file header. + * @param out {@link java.io.OutputStream} to receive the header. + * @return number of bytes written. + * @throws IOException + */ + public int write(OutputStream out) throws IOException { + /* RIFF header */ + writeId(out, "RIFF"); + writeInt(out, 36 + mNumBytes); + writeId(out, "WAVE"); + + /* fmt chunk */ + writeId(out, "fmt "); + writeInt(out, 16); + writeShort(out, mFormat); + writeShort(out, mNumChannels); + writeInt(out, mSampleRate); + writeInt(out, mNumChannels * mSampleRate * mBitsPerSample / 8); + writeShort(out, (short)(mNumChannels * mBitsPerSample / 8)); + writeShort(out, mBitsPerSample); + + /* data chunk */ + writeId(out, "data"); + writeInt(out, mNumBytes); + + return HEADER_LENGTH; + } + + private static void writeId(OutputStream out, String id) throws IOException { + for (int i = 0; i < id.length(); i++) out.write(id.charAt(i)); + } + + private static void writeInt(OutputStream out, int val) throws IOException { + out.write(val >> 0); + out.write(val >> 8); + out.write(val >> 16); + out.write(val >> 24); + } + + private static void writeShort(OutputStream out, short val) throws IOException { + out.write(val >> 0); + out.write(val >> 8); + } + + @Override + public String toString() { + return String.format( + "WaveHeader format=%d numChannels=%d sampleRate=%d bitsPerSample=%d numBytes=%d", + mFormat, mNumChannels, mSampleRate, mBitsPerSample, mNumBytes); + } + +} \ No newline at end of file diff --git a/app/src/main/java/be/tarsos/dsp/writer/WriterProcessor.java b/app/src/main/java/be/tarsos/dsp/writer/WriterProcessor.java new file mode 100644 index 0000000..4e43637 --- /dev/null +++ b/app/src/main/java/be/tarsos/dsp/writer/WriterProcessor.java @@ -0,0 +1,65 @@ +package be.tarsos.dsp.writer; + + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.RandomAccessFile; + +import be.tarsos.dsp.AudioEvent; +import be.tarsos.dsp.AudioProcessor; +import be.tarsos.dsp.io.TarsosDSPAudioFormat; + +/** + * This class writes the ongoing sound to an output specified by the programmer + * + */ +public class WriterProcessor implements AudioProcessor { + RandomAccessFile output; + TarsosDSPAudioFormat audioFormat; + private int audioLen=0; + private static final int HEADER_LENGTH=44;//byte + + /** + * + * @param audioFormat which this processor is attached to + * @param output randomaccessfile of the output file + */ + public WriterProcessor(TarsosDSPAudioFormat audioFormat,RandomAccessFile output){ + this.output=output; + this.audioFormat=audioFormat; + try { + output.write(new byte[HEADER_LENGTH]); + } catch (IOException e) { + e.printStackTrace(); + } + } + @Override + public boolean process(AudioEvent audioEvent) { + try { + audioLen+=audioEvent.getByteBuffer().length; + //write audio to the output + output.write(audioEvent.getByteBuffer()); + } catch (IOException e) { + e.printStackTrace(); + } + return true; + } + + @Override + public void processingFinished() { + //write header and data to the result output + WaveHeader waveHeader=new WaveHeader(WaveHeader.FORMAT_PCM, + (short)audioFormat.getChannels(), + (int)audioFormat.getSampleRate(),(short)16,audioLen);//16 is for pcm, Read WaveHeader class for more details + ByteArrayOutputStream header=new ByteArrayOutputStream(); + try { + waveHeader.write(header); + output.seek(0); + output.write(header.toByteArray()); + output.close(); + }catch (IOException e){ + e.printStackTrace(); + } + + } +} diff --git a/app/src/main/java/com/ztftrue/music/ErrorTipActivity.kt b/app/src/main/java/com/ztftrue/music/ErrorTipActivity.kt new file mode 100644 index 0000000..a4225a0 --- /dev/null +++ b/app/src/main/java/com/ztftrue/music/ErrorTipActivity.kt @@ -0,0 +1,156 @@ +package com.ztftrue.music + +import android.content.Intent +import android.net.Uri +import android.os.Bundle +import android.util.Log +import androidx.activity.ComponentActivity +import androidx.activity.compose.BackHandler +import androidx.activity.compose.setContent +import androidx.activity.viewModels +import androidx.compose.foundation.background +import androidx.compose.foundation.layout.Column +import androidx.compose.foundation.layout.Row +import androidx.compose.foundation.layout.fillMaxSize +import androidx.compose.foundation.layout.fillMaxWidth +import androidx.compose.foundation.layout.height +import androidx.compose.foundation.layout.padding +import androidx.compose.foundation.lazy.LazyColumn +import androidx.compose.material3.Button +import androidx.compose.material3.HorizontalDivider +import androidx.compose.material3.MaterialTheme +import androidx.compose.material3.Scaffold +import androidx.compose.material3.Surface +import androidx.compose.material3.Text +import androidx.compose.runtime.Composable +import androidx.compose.runtime.LaunchedEffect +import androidx.compose.runtime.getValue +import androidx.compose.runtime.mutableStateOf +import androidx.compose.runtime.remember +import androidx.compose.runtime.setValue +import androidx.compose.ui.Modifier +import androidx.compose.ui.res.stringResource +import androidx.compose.ui.semantics.contentDescription +import androidx.compose.ui.semantics.semantics +import androidx.compose.ui.unit.dp +import androidx.media3.common.util.UnstableApi +import com.ztftrue.music.ui.theme.MusicPitchTheme + + +class ErrorTipActivity : ComponentActivity() { + private val musicViewModel: MusicViewModel by viewModels() + + override fun onCreate(savedInstanceState: Bundle?) { + super.onCreate(savedInstanceState) + setContent { + MusicPitchTheme(musicViewModel) { + ErrorCollectorView(this@ErrorTipActivity) + } + } + } + + @androidx.annotation.OptIn(UnstableApi::class) + @Composable + fun ErrorCollectorView( + activity: ErrorTipActivity + ) { + var errorMessage by remember { mutableStateOf("") } + LaunchedEffect(Unit) { + try { + val error: Throwable = intent.getSerializableExtra("error") as Throwable + val sb = StringBuilder("") + for (element in error.stackTrace) { + sb.append(element.toString()) + sb.append("\n") + } + errorMessage = sb.toString() + } catch (e: Exception) { + Log.e("ERROR", e.toString()) + } + } + BackHandler(enabled = true) { + finish() + } + Surface( + modifier = Modifier + .fillMaxSize() + .semantics { contentDescription = "MainView" }, + color = MaterialTheme.colorScheme.background + ) { + + Scaffold(modifier = Modifier, + topBar = { + Column { + Text( + text = stringResource(R.string.error_tip), + color = MaterialTheme.colorScheme.onBackground + ) + HorizontalDivider( + modifier = Modifier + .fillMaxWidth() + .height(1.dp) + .background(color = MaterialTheme.colorScheme.onBackground) + ) + } + }, bottomBar = { + Row { + Button( + onClick = { + val intent = Intent(Intent.ACTION_SEND) + intent.setType("text/plain") + intent.putExtra(Intent.EXTRA_SUBJECT, "Crash report") + intent.putExtra(Intent.EXTRA_TEXT, errorMessage) + startActivity( + Intent.createChooser( + intent, + "Copy" + ) + ) + activity.startActivity(intent) + } + ) { + Text( + text = stringResource(R.string.feedback) + ) + } + Button( + onClick = { + val intent = Intent(Intent.ACTION_SENDTO).apply { + data = + Uri.parse("mailto:") // only email apps should handle this + putExtra(Intent.EXTRA_EMAIL, arrayOf("ztftrue@gmail.com")) + putExtra(Intent.EXTRA_SUBJECT, "Crash report") + putExtra(Intent.EXTRA_TEXT, errorMessage) + } + activity.startActivity(intent) + } + ) { + Text( + text = stringResource(R.string.send_email) + ) + } + } + + }, content = { + LazyColumn( + modifier = Modifier + .fillMaxSize() + .padding(it) + ) { + item(1) { + Text( + text = stringResource(R.string.sorry_some_error_happens_you_can_feedback_it_with_this_message), + color = MaterialTheme.colorScheme.onBackground + ) + Text( + text = errorMessage, + color = MaterialTheme.colorScheme.onBackground + ) + } + } + }) + } + + } + +} \ No newline at end of file diff --git a/app/src/main/java/com/ztftrue/music/MainActivity.kt b/app/src/main/java/com/ztftrue/music/MainActivity.kt index 36b76c6..ddf735f 100644 --- a/app/src/main/java/com/ztftrue/music/MainActivity.kt +++ b/app/src/main/java/com/ztftrue/music/MainActivity.kt @@ -20,7 +20,6 @@ import android.support.v4.media.MediaMetadataCompat import android.support.v4.media.session.MediaControllerCompat import android.support.v4.media.session.MediaSessionCompat import android.support.v4.media.session.PlaybackStateCompat -import android.util.Log import androidx.activity.ComponentActivity import androidx.activity.compose.setContent import androidx.activity.result.ActivityResultLauncher @@ -37,6 +36,7 @@ import androidx.compose.material3.Text import androidx.compose.ui.Alignment import androidx.compose.ui.Modifier import androidx.compose.ui.graphics.Color +import androidx.compose.ui.res.stringResource import androidx.compose.ui.unit.sp import androidx.core.app.ActivityCompat import androidx.core.splashscreen.SplashScreen @@ -276,13 +276,13 @@ class MainActivity : ComponentActivity() { ActivityResultContracts.StartActivityForResult() ) { result -> if (result.resultCode == RESULT_OK) { - val treeUri = result.data?.data; + val treeUri = result.data?.data if (treeUri != null) { contentResolver.takePersistableUriPermission( treeUri, Intent.FLAG_GRANT_READ_URI_PERMISSION or Intent.FLAG_GRANT_WRITE_URI_PERMISSION ) - CoroutineScope(Dispatchers.IO).launch{ + CoroutineScope(Dispatchers.IO).launch { musicViewModel.getDb(this@MainActivity).StorageFolderDao().insert( StorageFolder(null, treeUri.toString()) ) @@ -352,7 +352,7 @@ class MainActivity : ComponentActivity() { verticalArrangement = Arrangement.Center ) { Text( - text = "Can't find any audio file\n I need permission\nClick here to open settings", + text = stringResource(R.string.can_t_find_any_audio_file_i_need_permission_click_here_to_open_settings), fontSize = 20.sp, color = Color.Red ) @@ -493,7 +493,7 @@ class MainActivity : ComponentActivity() { // before switch to another music, must clear lyrics musicViewModel.currentCaptionList.clear() val index = it.getInt("index") - if (index >= 0 && musicViewModel.musicQueue.size > index) { + if (index >= 0 && musicViewModel.musicQueue.size > index && index != musicViewModel.currentPlayQueueIndex.intValue) { musicViewModel.currentMusicCover.value = null musicViewModel.currentPlay.value = musicViewModel.musicQueue[index] @@ -504,7 +504,6 @@ class MainActivity : ComponentActivity() { this@MainActivity, musicViewModel.musicQueue[index] ) - } } else if (it.getInt("type") == EVENT_MEDIA_METADATA_Change) { val cover = it.getByteArray("cover") @@ -602,7 +601,6 @@ class MainActivity : ComponentActivity() { musicViewModel.musicQueue.addAll(it) } resultData.getParcelableArrayList("songsList")?.also { - Log.i("TAG", "getInitData: ${it.size}") musicViewModel.songsList.clear() musicViewModel.songsList.addAll(it) } diff --git a/app/src/main/java/com/ztftrue/music/MusicViewModel.kt b/app/src/main/java/com/ztftrue/music/MusicViewModel.kt index 33b8bce..48aaac5 100644 --- a/app/src/main/java/com/ztftrue/music/MusicViewModel.kt +++ b/app/src/main/java/com/ztftrue/music/MusicViewModel.kt @@ -9,6 +9,7 @@ import android.media.MediaMetadataRetriever import android.net.Uri import android.support.v4.media.MediaBrowserCompat import android.support.v4.media.session.MediaControllerCompat +import android.util.Log import androidx.compose.runtime.mutableFloatStateOf import androidx.compose.runtime.mutableIntStateOf import androidx.compose.runtime.mutableLongStateOf @@ -37,6 +38,7 @@ import com.ztftrue.music.utils.model.EqualizerBand import com.ztftrue.music.utils.model.ListStringCaption import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.Job import kotlinx.coroutines.launch import java.io.BufferedReader import java.io.File @@ -151,130 +153,148 @@ class MusicViewModel : ViewModel() { } return db!! } - + var lyricsJob: Job? =null fun dealLyrics(context: Context, currentPlay: MusicItem) { currentCaptionList.clear() - val regexPattern = Regex("[<>\"/~'{}?,+=)(^&*%!@#\$]") - val artistsFolder = currentPlay.artist.replace( - regexPattern, - "_" - ) - val folderPath = "$Lyrics/$artistsFolder" - val folder = context.getExternalFilesDir( - folderPath - ) - folder?.mkdirs() - val id = currentPlay.name.replace(regexPattern, "_") - val path = "${context.getExternalFilesDir(folderPath)?.absolutePath}/$id" - val text = File("$path.txt") - val firstEmbeddedLyrics = - context.getSharedPreferences(LYRICS_SETTINGS, Context.MODE_PRIVATE) - .getBoolean(FIRST_EMBEDDED_LYRICS, false) - val embeddedLyrics = arrayListOf() - val fileLyrics = arrayListOf() - embeddedLyrics.addAll(CaptionUtils.getEmbeddedLyrics(currentPlay.path, context)) - if (firstEmbeddedLyrics && embeddedLyrics.isNotEmpty()) { - lyricsType = LyricsType.TEXT - } else if (text.exists()) { - lyricsType = LyricsType.TEXT - fileLyrics.addAll( - readCaptions( - text.bufferedReader(), - LyricsType.TEXT, - context - ) + if(lyricsJob!=null&&lyricsJob?.isActive==true){ + lyricsJob?.cancel() + } + lyricsJob = CoroutineScope(Dispatchers.IO).launch { + val regexPattern = Regex("[<>\"/~'{}?,+=)(^&*%!@#\$]") + val artistsFolder = currentPlay.artist.replace( + regexPattern, + "_" ) - } else if (File("$path.lrc").exists()) { - lyricsType = LyricsType.LRC - fileLyrics.addAll( - readCaptions( - File("$path.lrc").bufferedReader(), - LyricsType.LRC, - context - ) + val folderPath = "$Lyrics/$artistsFolder" + val folder = context.getExternalFilesDir( + folderPath ) - } else if (File("$path.srt").exists()) { - lyricsType = LyricsType.SRT - fileLyrics.addAll( - readCaptions( - File("$path.srt").bufferedReader(), - LyricsType.SRT, - context + folder?.mkdirs() + val id = currentPlay.name.replace(regexPattern, "_") + val path = "${context.getExternalFilesDir(folderPath)?.absolutePath}/$id" + val text = File("$path.txt") + val firstEmbeddedLyrics = + context.getSharedPreferences(LYRICS_SETTINGS, Context.MODE_PRIVATE) + .getBoolean(FIRST_EMBEDDED_LYRICS, false) + val embeddedLyrics = arrayListOf() + val fileLyrics = arrayListOf() + embeddedLyrics.addAll(CaptionUtils.getEmbeddedLyrics(currentPlay.path, context)) + if (firstEmbeddedLyrics && embeddedLyrics.isNotEmpty()) { + lyricsType = LyricsType.TEXT + } else if (text.exists()) { + lyricsType = LyricsType.TEXT + fileLyrics.addAll( + readCaptions( + text.bufferedReader(), + LyricsType.TEXT, + context + ) ) - ) - } else if (File("$path.vtt").exists()) { - lyricsType = LyricsType.VTT - fileLyrics.addAll( - readCaptions( - File("$path.vtt").bufferedReader(), - LyricsType.VTT, - context + } else if (File("$path.lrc").exists()) { + lyricsType = LyricsType.LRC + fileLyrics.addAll( + readCaptions( + File("$path.lrc").bufferedReader(), + LyricsType.LRC, + context + ) ) - ) - } else { - CoroutineScope(Dispatchers.IO).launch { - getDb(context).StorageFolderDao().findAll()?.forEach { storageFolder -> - val treeUri = Uri.parse(storageFolder.uri) - if (treeUri != null) { - context.contentResolver.takePersistableUriPermission( - treeUri, - Intent.FLAG_GRANT_READ_URI_PERMISSION or Intent.FLAG_GRANT_WRITE_URI_PERMISSION - ) - val pickedDir = DocumentFile.fromTreeUri(context, treeUri) - val d = pickedDir?.listFiles() - d?.forEach { - if (it.isFile && it.canRead() - ) { - val musicName: String = try { - currentPlay.path.substring( - currentPlay.path.lastIndexOf("/") + 1, - currentPlay.path.lastIndexOf(".") - ) - } catch (e: Exception) { - "" - } - val fileNameWithSuffix = it.name?.lowercase() ?: "" - val fileName = try { - fileNameWithSuffix.substring( - 0, - fileNameWithSuffix.lastIndexOf(".") - ) - } catch (e: Exception) { - "" - } - if (fileName.trim().lowercase() == musicName.trim() - .lowercase() - ) { - if (fileNameWithSuffix.endsWith(".lrc")) { - fileLyrics.addAll(fileRead(it.uri, context, LyricsType.LRC)) - } else if (fileNameWithSuffix.endsWith(".srt")) { - fileLyrics.addAll(fileRead(it.uri, context, LyricsType.SRT)) - } else if (fileNameWithSuffix.endsWith(".vtt")) { - fileLyrics.addAll(fileRead(it.uri, context, LyricsType.VTT)) - } else if (fileNameWithSuffix.endsWith(".txt")) { - fileLyrics.addAll( - fileRead( - it.uri, - context, + } else if (File("$path.srt").exists()) { + lyricsType = LyricsType.SRT + fileLyrics.addAll( + readCaptions( + File("$path.srt").bufferedReader(), + LyricsType.SRT, + context + ) + ) + } else if (File("$path.vtt").exists()) { + lyricsType = LyricsType.VTT + fileLyrics.addAll( + readCaptions( + File("$path.vtt").bufferedReader(), + LyricsType.VTT, + context + ) + ) + } else { + val musicName: String = try { + currentPlay.path.substring( + currentPlay.path.lastIndexOf("/") + 1, + currentPlay.path.lastIndexOf(".") + ) + } catch (e: Exception) { + "" + } + val files = getDb(context).StorageFolderDao().findAll(); + if (files != null) { + outer@ for (storageFolder in files) { + val treeUri = Uri.parse(storageFolder.uri) + if (treeUri != null) { + context.contentResolver.takePersistableUriPermission( + treeUri, + Intent.FLAG_GRANT_READ_URI_PERMISSION or Intent.FLAG_GRANT_WRITE_URI_PERMISSION + ) + val pickedDir = DocumentFile.fromTreeUri(context, treeUri) + val d = pickedDir?.listFiles() + if (d != null) { + for (it in d) { + if (it.isFile && it.canRead() + ) { + val fileNameWithSuffix = + it.name?.lowercase() ?: "" + val type = + if (fileNameWithSuffix.endsWith(".lrc")) { + LyricsType.LRC + } else if (fileNameWithSuffix.endsWith(".srt")) { + LyricsType.SRT + } else if (fileNameWithSuffix.endsWith(".vtt")) { + LyricsType.VTT + } else if (fileNameWithSuffix.endsWith(".txt")) { LyricsType.TEXT + } else { + continue + } + val fileName = try { + fileNameWithSuffix.substring( + 0, + fileNameWithSuffix.indexOf(".") + ) + } catch (e: Exception) { + "" + } + if (fileName.trim() + .lowercase() == musicName.trim() + .lowercase() + ) { + fileLyrics.addAll( + fileRead( + it.uri, + context, + type + ) ) - ) + break@outer + } } } } } } + } + } + if (fileLyrics.isNotEmpty()) { + currentCaptionList.addAll(fileLyrics) + } else if (embeddedLyrics.isNotEmpty()) { + currentCaptionList.addAll(embeddedLyrics) + } + val duration = currentPlay.duration + // every lyrics line duration + itemDuration = + duration / if (currentCaptionList.size == 0) 1 else currentCaptionList.size } - if (fileLyrics.isNotEmpty()) { - currentCaptionList.addAll(fileLyrics) - } else if (embeddedLyrics.isNotEmpty()) { - currentCaptionList.addAll(embeddedLyrics) - } - val duration = currentPlay.duration - // every lyrics line duration - itemDuration = duration / if (currentCaptionList.size == 0) 1 else currentCaptionList.size } private fun fileRead( @@ -300,12 +320,15 @@ class MusicViewModel : ViewModel() { LyricsType.SRT -> { captions.addAll(CaptionUtils.parseSrtFile(bufferedReader)) } + LyricsType.VTT -> { captions.addAll(CaptionUtils.parseVttFile(bufferedReader)) } + LyricsType.LRC -> { captions.addAll(CaptionUtils.parseLrcFile(bufferedReader, context)) } + LyricsType.TEXT -> { captions.addAll(CaptionUtils.parseTextFile(bufferedReader, context)) } @@ -319,7 +342,6 @@ class MusicViewModel : ViewModel() { ) arrayList.add(an) } - return arrayList } diff --git a/app/src/main/java/com/ztftrue/music/MyApplication.kt b/app/src/main/java/com/ztftrue/music/MyApplication.kt new file mode 100644 index 0000000..f9a5416 --- /dev/null +++ b/app/src/main/java/com/ztftrue/music/MyApplication.kt @@ -0,0 +1,24 @@ +package com.ztftrue.music + +import android.app.Application +import android.content.Intent + + +class MyApplication : Application() { + override fun onCreate() { + super.onCreate() + Thread.setDefaultUncaughtExceptionHandler { _, e -> + handleUncaughtException(e) + } + } + + private fun handleUncaughtException(e: Throwable) { + + val intent = Intent(applicationContext, ErrorTipActivity::class.java) + intent.putExtra("error", e) + intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP) + intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TASK) + intent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK) + startActivity(intent) + } +} \ No newline at end of file diff --git a/app/src/main/java/com/ztftrue/music/play/CreateNotification.kt b/app/src/main/java/com/ztftrue/music/play/CreateNotification.kt index 1ab67f6..0663628 100644 --- a/app/src/main/java/com/ztftrue/music/play/CreateNotification.kt +++ b/app/src/main/java/com/ztftrue/music/play/CreateNotification.kt @@ -183,8 +183,8 @@ class CreateNotification(service: Service, private val mediaSession: MediaSessio private fun createNotificationChannel(context: Context) { // Create the NotificationChannel, but only on API 26+ because // the NotificationChannel class is new and not in the support library - val name = "MonsterMusic" - val descriptionText = "PlayNotify" + val name = context.getString(R.string.app_name) + val descriptionText = context.getString(R.string.play_notify) val importance = NotificationManager.IMPORTANCE_LOW val channel = NotificationChannel(CHANNEL_ID, name, importance).apply { description = descriptionText diff --git a/app/src/main/java/com/ztftrue/music/play/PlayService.kt b/app/src/main/java/com/ztftrue/music/play/PlayService.kt index 8b37ef7..afe035c 100644 --- a/app/src/main/java/com/ztftrue/music/play/PlayService.kt +++ b/app/src/main/java/com/ztftrue/music/play/PlayService.kt @@ -35,6 +35,7 @@ import androidx.media3.exoplayer.audio.ForwardingAudioSink import androidx.media3.exoplayer.trackselection.AdaptiveTrackSelection import androidx.media3.exoplayer.trackselection.DefaultTrackSelector import com.ztftrue.music.MainActivity +import com.ztftrue.music.R import com.ztftrue.music.effects.EchoAudioProcessor import com.ztftrue.music.effects.EqualizerAudioProcessor import com.ztftrue.music.sqlData.MusicDatabase @@ -422,7 +423,8 @@ class PlayService : MediaBrowserServiceCompat() { if (musicItem != null && playList != null) { if (playList.type == PlayListType.Queue || (playList.type == playListCurrent?.type && playList.id == playListCurrent?.id - &&musicQueue.size==musicItems?.size)) { + && musicQueue.size == musicItems?.size) + ) { playMusicCurrentQueue(musicItem, index) } else { playMusicSwitchQueue(musicItem, playList, index, musicItems) @@ -1444,6 +1446,7 @@ class PlayService : MediaBrowserServiceCompat() { } var errorCount = 0 + var lastMediaIndex = -1 private fun playerAddListener() { exoPlayer.addListener(@UnstableApi object : Player.Listener { override fun onIsPlayingChanged(isPlaying: Boolean) { @@ -1484,13 +1487,13 @@ class PlayService : MediaBrowserServiceCompat() { if (errorCount > 3) { Toast.makeText( this@PlayService, - "Many times play error, Play paused", + getString(R.string.mutiple_error_tip), Toast.LENGTH_SHORT ).show() } else { Toast.makeText( this@PlayService, - "Play error, auto play next", + getString(R.string.play_error_play_next), Toast.LENGTH_SHORT ).show() exoPlayer.seekToNextMediaItem() @@ -1505,7 +1508,6 @@ class PlayService : MediaBrowserServiceCompat() { reason: Int ) { super.onPositionDiscontinuity(oldPosition, newPosition, reason) - // TODO seek bar also call this function if (musicQueue.isEmpty()) return currentPlayTrack = musicQueue[newPosition.mediaItemIndex] @@ -1514,6 +1516,21 @@ class PlayService : MediaBrowserServiceCompat() { needPlayPause = false timeFinish() } +// when (reason) { +// Player.DISCONTINUITY_REASON_SEEK -> { +// // Handle seek +// } +// +// Player.DISCONTINUITY_REASON_SEEK_ADJUSTMENT -> { +// +// } +// +// else -> { +// // Handle other reasons +// } +// } + + } override fun onMediaMetadataChanged(mediaMetadata: MediaMetadata) { diff --git a/app/src/main/java/com/ztftrue/music/ui/home/AlbumGridView.kt b/app/src/main/java/com/ztftrue/music/ui/home/AlbumGridView.kt index 5403a27..92ab4d0 100644 --- a/app/src/main/java/com/ztftrue/music/ui/home/AlbumGridView.kt +++ b/app/src/main/java/com/ztftrue/music/ui/home/AlbumGridView.kt @@ -31,7 +31,7 @@ import androidx.compose.foundation.shape.CircleShape import androidx.compose.foundation.shape.RoundedCornerShape import androidx.compose.material.icons.Icons import androidx.compose.material.icons.filled.MoreVert -import androidx.compose.material3.Divider +import androidx.compose.material3.HorizontalDivider import androidx.compose.material3.Icon import androidx.compose.material3.IconButton import androidx.compose.material3.MaterialTheme @@ -54,6 +54,7 @@ import androidx.compose.ui.graphics.ColorFilter import androidx.compose.ui.platform.LocalConfiguration import androidx.compose.ui.platform.LocalContext import androidx.compose.ui.res.painterResource +import androidx.compose.ui.res.stringResource import androidx.compose.ui.unit.dp import androidx.compose.ui.window.Dialog import androidx.compose.ui.window.DialogProperties @@ -134,7 +135,9 @@ fun AlbumGridView( LazyRow( contentPadding = PaddingValues(5.dp), state = rowListSate, - modifier = modifier.background(MaterialTheme.colorScheme.background).fillMaxSize() + modifier = modifier + .background(MaterialTheme.colorScheme.background) + .fillMaxSize() ) { items(albumList.size) { index -> val item = albumList[index] @@ -158,7 +161,8 @@ fun AlbumGridView( // ScrollDirectionType.GRID_VERTICAL LazyVerticalGrid( modifier = Modifier - .background(MaterialTheme.colorScheme.background).fillMaxSize(), + .background(MaterialTheme.colorScheme.background) + .fillMaxSize(), columns = GridCells.Fixed(musicViewModel.albumItemsCount.intValue), // Number of columns in the grid contentPadding = PaddingValues(5.dp), state = listState @@ -360,10 +364,10 @@ fun AlbumsOperateDialog( ) { Text( color = MaterialTheme.colorScheme.onBackground, - text = "Operate", modifier = Modifier + text = stringResource(id = R.string.operate), modifier = Modifier .padding(2.dp) ) - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) @@ -394,7 +398,8 @@ fun AlbumsOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Add to queue", Modifier.padding(start = 10.dp), + text = stringResource(id = R.string.add_to_queue), + modifier = Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) } @@ -418,8 +423,8 @@ fun AlbumsOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Play next", - Modifier.padding(start = 10.dp), + text = stringResource(id = R.string.play_next), + modifier = Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) } @@ -443,8 +448,8 @@ fun AlbumsOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Add to playlist", - Modifier.padding(start = 10.dp), + text = stringResource(id = R.string.add_to_playlist), + modifier = Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) } @@ -467,8 +472,8 @@ fun AlbumsOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Artist", - Modifier.padding(start = 10.dp), + text = stringResource(id = R.string.artist), + modifier = Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) } @@ -487,7 +492,7 @@ fun AlbumsOperateDialog( .padding(8.dp) .fillMaxWidth(), ) { - Text("Cancel", color = MaterialTheme.colorScheme.onBackground) + Text(stringResource(R.string.cancel), color = MaterialTheme.colorScheme.onBackground) } } } diff --git a/app/src/main/java/com/ztftrue/music/ui/home/ArtistsGridView.kt b/app/src/main/java/com/ztftrue/music/ui/home/ArtistsGridView.kt index 8402026..0294ba0 100644 --- a/app/src/main/java/com/ztftrue/music/ui/home/ArtistsGridView.kt +++ b/app/src/main/java/com/ztftrue/music/ui/home/ArtistsGridView.kt @@ -31,7 +31,7 @@ import androidx.compose.foundation.shape.CircleShape import androidx.compose.foundation.shape.RoundedCornerShape import androidx.compose.material.icons.Icons import androidx.compose.material.icons.filled.MoreVert -import androidx.compose.material3.Divider +import androidx.compose.material3.HorizontalDivider import androidx.compose.material3.Icon import androidx.compose.material3.IconButton import androidx.compose.material3.MaterialTheme @@ -54,6 +54,7 @@ import androidx.compose.ui.graphics.ColorFilter import androidx.compose.ui.platform.LocalConfiguration import androidx.compose.ui.platform.LocalContext import androidx.compose.ui.res.painterResource +import androidx.compose.ui.res.stringResource import androidx.compose.ui.unit.dp import androidx.compose.ui.window.Dialog import androidx.compose.ui.window.DialogProperties @@ -360,11 +361,11 @@ fun ArtistsOperateDialog( horizontalAlignment = Alignment.CenterHorizontally, ) { Text( - text = "Operate", modifier = Modifier + text = stringResource(id = R.string.operate), modifier = Modifier .padding(2.dp), color = MaterialTheme.colorScheme.onBackground ) - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) @@ -395,8 +396,8 @@ fun ArtistsOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Add to queue", - Modifier.padding(start = 10.dp), + text = stringResource(id = R.string.add_to_queue), + modifier = Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) } @@ -420,7 +421,7 @@ fun ArtistsOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Play next", + text = stringResource(id = R.string.play_next), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -445,7 +446,7 @@ fun ArtistsOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Add to playlist", + text = stringResource(id = R.string.add_to_playlist), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -465,7 +466,7 @@ fun ArtistsOperateDialog( .padding(8.dp) .fillMaxWidth(), ) { - Text("Cancel", color = MaterialTheme.colorScheme.onBackground) + Text(stringResource(R.string.cancel), color = MaterialTheme.colorScheme.onBackground) } } } diff --git a/app/src/main/java/com/ztftrue/music/ui/home/FolderListView.kt b/app/src/main/java/com/ztftrue/music/ui/home/FolderListView.kt index 74a7ada..07b2ffd 100644 --- a/app/src/main/java/com/ztftrue/music/ui/home/FolderListView.kt +++ b/app/src/main/java/com/ztftrue/music/ui/home/FolderListView.kt @@ -24,7 +24,7 @@ import androidx.compose.foundation.rememberScrollState import androidx.compose.foundation.shape.CircleShape import androidx.compose.material.icons.Icons import androidx.compose.material.icons.filled.MoreVert -import androidx.compose.material3.Divider +import androidx.compose.material3.HorizontalDivider import androidx.compose.material3.Icon import androidx.compose.material3.IconButton import androidx.compose.material3.MaterialTheme @@ -45,6 +45,7 @@ import androidx.compose.ui.geometry.Offset import androidx.compose.ui.graphics.ColorFilter import androidx.compose.ui.platform.LocalContext import androidx.compose.ui.res.painterResource +import androidx.compose.ui.res.stringResource import androidx.compose.ui.unit.dp import androidx.compose.ui.window.Dialog import androidx.compose.ui.window.DialogProperties @@ -102,7 +103,7 @@ fun FolderListView( Column { Text( - text = "Warning: Not all tracks can be show in other tabs, they don't show that in Ringtones or Notifications folders", + text = stringResource(R.string.warning_for_tracks_tab), modifier = Modifier.padding(10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -119,7 +120,7 @@ fun FolderListView( .fillMaxWidth(), navController ) - Divider(color = MaterialTheme.colorScheme.inverseOnSurface, thickness = 1.2.dp) + HorizontalDivider(color = MaterialTheme.colorScheme.inverseOnSurface, thickness = 1.2.dp) } } } @@ -268,11 +269,11 @@ fun FolderListOperateDialog( horizontalAlignment = Alignment.CenterHorizontally, ) { Text( - text = "Operate", modifier = Modifier + text = stringResource(R.string.operate), modifier = Modifier .padding(2.dp), color = MaterialTheme.colorScheme.onBackground ) - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) @@ -303,8 +304,8 @@ fun FolderListOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Add to queue", - Modifier.padding(start = 10.dp), + text = stringResource(R.string.add_to_queue), + modifier = Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -329,7 +330,7 @@ fun FolderListOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Play next", + text = stringResource(R.string.play_next), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -355,7 +356,7 @@ fun FolderListOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Add to playlist", + text = stringResource(R.string.add_to_playlist), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -375,7 +376,7 @@ fun FolderListOperateDialog( .padding(8.dp) .fillMaxWidth(), ) { - Text("Cancel", color = MaterialTheme.colorScheme.onBackground) + Text(stringResource(R.string.cancel), color = MaterialTheme.colorScheme.onBackground) } } } diff --git a/app/src/main/java/com/ztftrue/music/ui/home/GenreGridView.kt b/app/src/main/java/com/ztftrue/music/ui/home/GenreGridView.kt index 0b79f35..d11db52 100644 --- a/app/src/main/java/com/ztftrue/music/ui/home/GenreGridView.kt +++ b/app/src/main/java/com/ztftrue/music/ui/home/GenreGridView.kt @@ -31,7 +31,7 @@ import androidx.compose.foundation.shape.CircleShape import androidx.compose.foundation.shape.RoundedCornerShape import androidx.compose.material.icons.Icons import androidx.compose.material.icons.filled.MoreVert -import androidx.compose.material3.Divider +import androidx.compose.material3.HorizontalDivider import androidx.compose.material3.Icon import androidx.compose.material3.IconButton import androidx.compose.material3.MaterialTheme @@ -53,6 +53,7 @@ import androidx.compose.ui.graphics.ColorFilter import androidx.compose.ui.platform.LocalConfiguration import androidx.compose.ui.platform.LocalContext import androidx.compose.ui.res.painterResource +import androidx.compose.ui.res.stringResource import androidx.compose.ui.unit.dp import androidx.compose.ui.window.Dialog import androidx.compose.ui.window.DialogProperties @@ -126,7 +127,9 @@ fun GenreGridView( LazyRow( contentPadding = PaddingValues(5.dp), state = rowListSate, - modifier = modifier.background(MaterialTheme.colorScheme.background).fillMaxSize() + modifier = modifier + .background(MaterialTheme.colorScheme.background) + .fillMaxSize() ) { items(genreList.size) { index -> val item = genreList[index] @@ -152,7 +155,9 @@ fun GenreGridView( columns = GridCells.Fixed(musicViewModel.genreItemsCount.intValue), // Number of columns in the grid contentPadding = PaddingValues(5.dp), state = listState, - modifier = modifier.background(MaterialTheme.colorScheme.background).fillMaxSize() + modifier = modifier + .background(MaterialTheme.colorScheme.background) + .fillMaxSize() ) { items(genreList.size) { index -> val item = genreList[index] @@ -284,7 +289,7 @@ fun GenreItemView( horizontalArrangement = Arrangement.SpaceBetween ) { Text( - text = "$number song${if (number <= 1L) "" else "s"}", + text = stringResource(R.string.song, number), color = MaterialTheme.colorScheme.onSurfaceVariant, ) IconButton( @@ -338,11 +343,11 @@ fun GenreListOperateDialog( horizontalAlignment = Alignment.CenterHorizontally, ) { Text( - text = "Operate", modifier = Modifier + text = stringResource(id = R.string.operate), modifier = Modifier .padding(2.dp), color = MaterialTheme.colorScheme.onBackground ) - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) @@ -373,8 +378,8 @@ fun GenreListOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Add to queue", - Modifier.padding(start = 10.dp), + text = stringResource(id = R.string.add_to_queue), + modifier = Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) } @@ -398,7 +403,7 @@ fun GenreListOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Play next", + text = stringResource(id = R.string.play_next), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -423,7 +428,7 @@ fun GenreListOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Add to playlist", + text = stringResource(id = R.string.add_to_playlist), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -443,7 +448,7 @@ fun GenreListOperateDialog( .padding(8.dp) .fillMaxWidth(), ) { - Text("Cancel", color = MaterialTheme.colorScheme.onBackground) + Text(stringResource(R.string.cancel), color = MaterialTheme.colorScheme.onBackground) } } } diff --git a/app/src/main/java/com/ztftrue/music/ui/home/MainView.kt b/app/src/main/java/com/ztftrue/music/ui/home/MainView.kt index 886c32c..821e5ac 100644 --- a/app/src/main/java/com/ztftrue/music/ui/home/MainView.kt +++ b/app/src/main/java/com/ztftrue/music/ui/home/MainView.kt @@ -50,6 +50,7 @@ import androidx.compose.ui.draw.clip import androidx.compose.ui.graphics.ColorFilter import androidx.compose.ui.platform.LocalContext import androidx.compose.ui.res.painterResource +import androidx.compose.ui.res.stringResource import androidx.compose.ui.semantics.contentDescription import androidx.compose.ui.semantics.semantics import androidx.compose.ui.unit.dp @@ -74,6 +75,7 @@ import com.ztftrue.music.ui.public.SleepTimeDialog import com.ztftrue.music.ui.public.TracksListView import com.ztftrue.music.utils.OperateType import com.ztftrue.music.utils.PlayListType +import com.ztftrue.music.utils.Utils import com.ztftrue.music.utils.trackManager.PlaylistManager import kotlinx.coroutines.launch @@ -342,7 +344,7 @@ fun MainTopBar( ACTION_PlayLIST_CHANGE, null, null ) } else { - Toast.makeText(context, "创建失败", Toast.LENGTH_SHORT) + Toast.makeText(context, context.getString(R.string.create_failed), Toast.LENGTH_SHORT) .show() } } @@ -354,7 +356,7 @@ fun MainTopBar( }) { Icon( imageVector = Icons.Default.MoreVert, - contentDescription = "Operate", + contentDescription =stringResource(id = R.string.operate), modifier = Modifier .size(20.dp) .clip(CircleShape), @@ -400,7 +402,7 @@ fun MainTopBar( modifier = Modifier.fillMaxWidth(), indicator = { tabPositions -> if (tabPositions.isNotEmpty()) { - TabRowDefaults.Indicator( + TabRowDefaults.SecondaryIndicator( Modifier .height(3.0.dp) .tabIndicatorOffset(tabPositions[pagerState.currentPage]), @@ -408,7 +410,7 @@ fun MainTopBar( color = MaterialTheme.colorScheme.onBackground ) } else { - TabRowDefaults.Indicator( + TabRowDefaults.SecondaryIndicator( Modifier.height(3.0.dp), height = 3.0.dp, color = MaterialTheme.colorScheme.onBackground @@ -426,7 +428,7 @@ fun MainTopBar( }, text = { Text( - text = item.name, + text =stringResource(id = Utils.translateMap[item.name] ?:R.string.app_name) , color = MaterialTheme.colorScheme.onBackground, fontSize = 14.sp, ) diff --git a/app/src/main/java/com/ztftrue/music/ui/home/PlayListView.kt b/app/src/main/java/com/ztftrue/music/ui/home/PlayListView.kt index 9863da8..461e0bb 100644 --- a/app/src/main/java/com/ztftrue/music/ui/home/PlayListView.kt +++ b/app/src/main/java/com/ztftrue/music/ui/home/PlayListView.kt @@ -24,7 +24,7 @@ import androidx.compose.foundation.rememberScrollState import androidx.compose.foundation.shape.CircleShape import androidx.compose.material.icons.Icons import androidx.compose.material.icons.filled.MoreVert -import androidx.compose.material3.Divider +import androidx.compose.material3.HorizontalDivider import androidx.compose.material3.Icon import androidx.compose.material3.IconButton import androidx.compose.material3.MaterialTheme @@ -46,6 +46,7 @@ import androidx.compose.ui.geometry.Offset import androidx.compose.ui.graphics.ColorFilter import androidx.compose.ui.platform.LocalContext import androidx.compose.ui.res.painterResource +import androidx.compose.ui.res.stringResource import androidx.compose.ui.unit.dp import androidx.compose.ui.window.Dialog import androidx.compose.ui.window.DialogProperties @@ -117,7 +118,7 @@ fun PlayListView( PlayListType.PlayLists, playList, ) - Divider(color = MaterialTheme.colorScheme.inverseOnSurface, thickness = 1.2.dp) + HorizontalDivider(color = MaterialTheme.colorScheme.inverseOnSurface, thickness = 1.2.dp) } } } @@ -340,11 +341,11 @@ fun PlayListOperateDialog( horizontalAlignment = Alignment.CenterHorizontally, ) { Text( - text = "Operate", modifier = Modifier + text = stringResource(id = R.string.operate), modifier = Modifier .padding(2.dp), color = MaterialTheme.colorScheme.onBackground ) - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) @@ -375,7 +376,7 @@ fun PlayListOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Add to queue", + text = stringResource(id = R.string.add_to_queue), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -400,7 +401,7 @@ fun PlayListOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Play next", + text =stringResource(id = R.string.play_next), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -425,7 +426,7 @@ fun PlayListOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Add to playlist", + text = stringResource(id = R.string.add_to_playlist), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -449,8 +450,8 @@ fun PlayListOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Rename playlist", - Modifier.padding(start = 10.dp), + text = stringResource(R.string.rename_playlist), + modifier = Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) } @@ -473,8 +474,8 @@ fun PlayListOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Delete playlist", - Modifier.padding(start = 10.dp), + text = stringResource(R.string.delete_playlist), + modifier = Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) } @@ -493,7 +494,7 @@ fun PlayListOperateDialog( .padding(8.dp) .fillMaxWidth(), ) { - Text("Cancel", color = MaterialTheme.colorScheme.onBackground) + Text(stringResource(R.string.cancel), color = MaterialTheme.colorScheme.onBackground) } } } diff --git a/app/src/main/java/com/ztftrue/music/ui/other/DrawMenu.kt b/app/src/main/java/com/ztftrue/music/ui/other/DrawMenu.kt index 96d5c95..0c12bec 100644 --- a/app/src/main/java/com/ztftrue/music/ui/other/DrawMenu.kt +++ b/app/src/main/java/com/ztftrue/music/ui/other/DrawMenu.kt @@ -16,8 +16,8 @@ import androidx.compose.foundation.layout.width import androidx.compose.foundation.lazy.LazyColumn import androidx.compose.foundation.pager.PagerState import androidx.compose.foundation.shape.CircleShape -import androidx.compose.material3.Divider import androidx.compose.material3.DrawerState +import androidx.compose.material3.HorizontalDivider import androidx.compose.material3.MaterialTheme import androidx.compose.material3.ModalDrawerSheet import androidx.compose.material3.Text @@ -36,6 +36,7 @@ import androidx.compose.ui.geometry.Offset import androidx.compose.ui.layout.ContentScale import androidx.compose.ui.platform.LocalContext import androidx.compose.ui.res.painterResource +import androidx.compose.ui.res.stringResource import androidx.compose.ui.unit.dp import androidx.compose.ui.window.Dialog import androidx.compose.ui.window.DialogProperties @@ -154,7 +155,7 @@ fun DrawMenu( contentAlignment = Alignment.CenterStart ) { Text( - text = "Settings", + text = stringResource(R.string.settings), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground, ) @@ -186,7 +187,7 @@ fun DrawMenu( contentAlignment = Alignment.CenterStart ) { Text( - text = "FeedBack", + text = stringResource(R.string.feedback), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground, ) @@ -215,7 +216,7 @@ fun DrawMenu( contentAlignment = Alignment.CenterStart ) { Text( - text = "Exit APP", Modifier.padding(start = 10.dp), + text = stringResource(R.string.exit_app), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground, ) } @@ -250,12 +251,12 @@ fun FeedBackDialog(onDismiss: () -> Unit) { horizontalAlignment = Alignment.CenterHorizontally, ) { Text( - text = "About&Thanks", modifier = Modifier + text = stringResource(R.string.about_thanks), modifier = Modifier .padding(2.dp), color = MaterialTheme.colorScheme.onBackground ) - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) @@ -288,7 +289,7 @@ fun FeedBackDialog(onDismiss: () -> Unit) { contentAlignment = Alignment.CenterStart ) { Text( - text = "To github", + text = stringResource(R.string.to_github), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -317,7 +318,7 @@ fun FeedBackDialog(onDismiss: () -> Unit) { contentAlignment = Alignment.CenterStart ) { Text( - text = "Send email", Modifier.padding(start = 10.dp), + text = stringResource(R.string.send_email), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) } @@ -336,7 +337,7 @@ fun FeedBackDialog(onDismiss: () -> Unit) { .padding(8.dp) .fillMaxWidth(), ) { - Text("Cancel", color = MaterialTheme.colorScheme.onBackground) + Text(stringResource(R.string.cancel), color = MaterialTheme.colorScheme.onBackground) } } } diff --git a/app/src/main/java/com/ztftrue/music/ui/other/EditTrackPage.kt b/app/src/main/java/com/ztftrue/music/ui/other/EditTrackPage.kt index ceffcfd..894d13c 100644 --- a/app/src/main/java/com/ztftrue/music/ui/other/EditTrackPage.kt +++ b/app/src/main/java/com/ztftrue/music/ui/other/EditTrackPage.kt @@ -35,6 +35,7 @@ import androidx.compose.ui.graphics.Color import androidx.compose.ui.platform.LocalConfiguration import androidx.compose.ui.platform.LocalContext import androidx.compose.ui.platform.LocalSoftwareKeyboardController +import androidx.compose.ui.res.stringResource import androidx.compose.ui.text.TextStyle import androidx.compose.ui.text.input.ImeAction import androidx.compose.ui.text.input.KeyboardType @@ -43,6 +44,7 @@ import androidx.media3.common.util.UnstableApi import androidx.navigation.NavHostController import coil.compose.rememberAsyncImagePainter import com.ztftrue.music.MusicViewModel +import com.ztftrue.music.R import com.ztftrue.music.ui.public.BackButton import com.ztftrue.music.utils.trackManager.TracksManager @@ -158,7 +160,7 @@ fun EditTrackPage( } }, label = { - Text("Title") + Text(stringResource(id = R.string.title)) }, // Placeholder or hint text keyboardOptions = KeyboardOptions.Default.copy( imeAction = ImeAction.Next, @@ -190,7 +192,7 @@ fun EditTrackPage( } }, label = { - Text("Album") + Text(stringResource(R.string.album)) }, // Placeholder or hint text keyboardOptions = KeyboardOptions.Default.copy( imeAction = ImeAction.Next, @@ -222,7 +224,7 @@ fun EditTrackPage( } }, label = { - Text("Artist") + Text(stringResource(id = R.string.artist, "")) }, // Placeholder or hint text keyboardOptions = KeyboardOptions.Default.copy( imeAction = ImeAction.Next, @@ -254,7 +256,7 @@ fun EditTrackPage( } }, label = { - Text("Genre") + Text(stringResource(R.string.genre)) }, // Placeholder or hint text keyboardOptions = KeyboardOptions.Default.copy( imeAction = ImeAction.Next, @@ -286,7 +288,7 @@ fun EditTrackPage( } }, label = { - Text("Year") + Text(stringResource(R.string.year)) }, // Placeholder or hint text keyboardOptions = KeyboardOptions.Default.copy( imeAction = ImeAction.Next, diff --git a/app/src/main/java/com/ztftrue/music/ui/other/SearchPage.kt b/app/src/main/java/com/ztftrue/music/ui/other/SearchPage.kt index 043aa61..1d5dbbc 100644 --- a/app/src/main/java/com/ztftrue/music/ui/other/SearchPage.kt +++ b/app/src/main/java/com/ztftrue/music/ui/other/SearchPage.kt @@ -16,7 +16,7 @@ import androidx.compose.foundation.lazy.LazyColumn import androidx.compose.foundation.rememberScrollState import androidx.compose.foundation.text.KeyboardActions import androidx.compose.foundation.text.KeyboardOptions -import androidx.compose.material3.Divider +import androidx.compose.material3.HorizontalDivider import androidx.compose.material3.LocalContentColor import androidx.compose.material3.MaterialTheme import androidx.compose.material3.OutlinedTextField @@ -33,12 +33,12 @@ import androidx.compose.runtime.mutableStateOf import androidx.compose.runtime.remember import androidx.compose.runtime.setValue import androidx.compose.ui.Alignment -import androidx.compose.ui.ExperimentalComposeUiApi import androidx.compose.ui.Modifier import androidx.compose.ui.focus.FocusRequester import androidx.compose.ui.focus.focusRequester import androidx.compose.ui.platform.LocalConfiguration import androidx.compose.ui.platform.LocalSoftwareKeyboardController +import androidx.compose.ui.res.stringResource import androidx.compose.ui.text.TextStyle import androidx.compose.ui.text.input.ImeAction import androidx.compose.ui.text.input.KeyboardType @@ -46,17 +46,18 @@ import androidx.compose.ui.unit.dp import androidx.media3.common.util.UnstableApi import androidx.navigation.NavHostController import com.ztftrue.music.MusicViewModel +import com.ztftrue.music.R import com.ztftrue.music.play.ACTION_SEARCH import com.ztftrue.music.sqlData.model.MusicItem import com.ztftrue.music.ui.home.AlbumGridView import com.ztftrue.music.ui.home.ArtistsGridView import com.ztftrue.music.ui.public.BackButton import com.ztftrue.music.ui.public.TracksListView +import com.ztftrue.music.utils.PlayListType +import com.ztftrue.music.utils.ScrollDirectionType import com.ztftrue.music.utils.model.AlbumList import com.ztftrue.music.utils.model.AnyListBase import com.ztftrue.music.utils.model.ArtistList -import com.ztftrue.music.utils.PlayListType -import com.ztftrue.music.utils.ScrollDirectionType import kotlinx.coroutines.CoroutineScope import kotlinx.coroutines.Dispatchers import kotlinx.coroutines.Job @@ -67,7 +68,6 @@ import kotlinx.coroutines.launch /** * show all music of playlist */ -@OptIn(ExperimentalComposeUiApi::class) @UnstableApi @Composable fun SearchPage( @@ -174,7 +174,7 @@ fun SearchPage( } }, placeholder = { - Text("Enter text to search") + Text(stringResource(R.string.enter_text_to_search)) }, // Placeholder or hint text keyboardOptions = KeyboardOptions.Default.copy( imeAction = ImeAction.Done, @@ -220,13 +220,13 @@ fun SearchPage( val configuration = LocalConfiguration.current if (keywords.isNotEmpty() && albumsList.isEmpty() && artistList.isEmpty() && tracksList.isEmpty()) { Text( - text = "No result", + text = stringResource(R.string.no_music), color = MaterialTheme.colorScheme.onBackground, modifier = Modifier.horizontalScroll(rememberScrollState(0)) ) } if (albumsList.isNotEmpty()) { - Text(text = "Album") + Text(text =stringResource(R.string.album)) Box( modifier = Modifier .height((configuration.screenWidthDp / musicViewModel.albumItemsCount.intValue + 40).dp) @@ -241,7 +241,7 @@ fun SearchPage( ) } - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) @@ -249,7 +249,7 @@ fun SearchPage( ) } if (artistList.isNotEmpty()) { - Text(text = "Artist") + Text(text = stringResource(R.string.artist)) Box( modifier = Modifier .height((configuration.screenWidthDp / musicViewModel.albumItemsCount.intValue + 40).dp) @@ -263,7 +263,7 @@ fun SearchPage( scrollDirection = ScrollDirectionType.GRID_HORIZONTAL ) } - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) diff --git a/app/src/main/java/com/ztftrue/music/ui/other/SettingsPage.kt b/app/src/main/java/com/ztftrue/music/ui/other/SettingsPage.kt index 9cc0923..8e5fb4e 100644 --- a/app/src/main/java/com/ztftrue/music/ui/other/SettingsPage.kt +++ b/app/src/main/java/com/ztftrue/music/ui/other/SettingsPage.kt @@ -17,10 +17,10 @@ import androidx.compose.foundation.layout.width import androidx.compose.foundation.lazy.LazyColumn import androidx.compose.foundation.shape.CircleShape import androidx.compose.material3.Checkbox -import androidx.compose.material3.Divider import androidx.compose.material3.DropdownMenu import androidx.compose.material3.DropdownMenuItem import androidx.compose.material3.FilledIconButton +import androidx.compose.material3.HorizontalDivider import androidx.compose.material3.MaterialTheme import androidx.compose.material3.Scaffold import androidx.compose.material3.Switch @@ -44,6 +44,7 @@ import androidx.compose.ui.graphics.ColorFilter import androidx.compose.ui.platform.LocalContext import androidx.compose.ui.platform.LocalDensity import androidx.compose.ui.res.painterResource +import androidx.compose.ui.res.stringResource import androidx.compose.ui.semantics.contentDescription import androidx.compose.ui.semantics.semantics import androidx.compose.ui.unit.DpOffset @@ -93,8 +94,8 @@ fun SettingsPage( modifier = Modifier.padding(all = 0.dp), topBar = { Column { - BackTopBar(navController) - Divider( + BackTopBar(navController, stringResource(id = R.string.settings)) + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) @@ -141,7 +142,7 @@ fun SettingsPage( }) } Text( - text = "Manage tab items", + text = stringResource(R.string.manage_tab_items), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -166,7 +167,7 @@ fun SettingsPage( ) { Row { Text( - text = "Prefer embedded lyrics", + text = stringResource(R.string.prefer_embedded_lyrics), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -208,7 +209,7 @@ fun SettingsPage( } } Text( - text = "About", + text = stringResource(R.string.about), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -238,7 +239,7 @@ fun SettingsPage( val selectedText = if (selectedIndex >= 0) { Utils.items[selectedIndex] } else { - "" + R.string.app_name } Row( modifier = Modifier.fillMaxWidth(), @@ -246,12 +247,12 @@ fun SettingsPage( horizontalArrangement = Arrangement.SpaceBetween ) { Text( - text = "Select theme:", + text = stringResource(R.string.select_theme), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) Text( - text = selectedText, + text = stringResource(id = selectedText), Modifier.padding(end = 10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -274,7 +275,7 @@ fun SettingsPage( DropdownMenuItem( text = { Text( - item, + stringResource(id = item), color = MaterialTheme.colorScheme.onTertiaryContainer ) }, @@ -348,12 +349,12 @@ fun ManageTabDialog(musicViewModel: MusicViewModel, onDismiss: () -> Unit) { horizontalAlignment = Alignment.CenterHorizontally, ) { Text( - text = "ManageTab", modifier = Modifier + text = stringResource(R.string.manage_tab_items), modifier = Modifier .padding(2.dp), color = MaterialTheme.colorScheme.onBackground ) - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) @@ -407,7 +408,7 @@ fun ManageTabDialog(musicViewModel: MusicViewModel, onDismiss: () -> Unit) { .clip(CircleShape), ) } - Divider( + HorizontalDivider( color = Color.Transparent, modifier = Modifier .height(40.dp) @@ -440,7 +441,9 @@ fun ManageTabDialog(musicViewModel: MusicViewModel, onDismiss: () -> Unit) { } } Text( - text = item.name, + text = stringResource( + id = Utils.translateMap[item.name] ?: R.string.app_name + ), color = MaterialTheme.colorScheme.onBackground ) } @@ -478,9 +481,12 @@ fun ManageTabDialog(musicViewModel: MusicViewModel, onDismiss: () -> Unit) { .padding(8.dp) .fillMaxWidth(0.5f), ) { - Text("Cancel", color = MaterialTheme.colorScheme.onBackground) + Text( + stringResource(R.string.cancel), + color = MaterialTheme.colorScheme.onBackground + ) } - Divider( + HorizontalDivider( modifier = Modifier .background(MaterialTheme.colorScheme.onBackground) .width(1.dp) @@ -495,7 +501,10 @@ fun ManageTabDialog(musicViewModel: MusicViewModel, onDismiss: () -> Unit) { .fillMaxWidth(), ) { - Text("Ok", color = MaterialTheme.colorScheme.onBackground) + Text( + stringResource(id = R.string.confirm), + color = MaterialTheme.colorScheme.onBackground + ) } } } @@ -532,12 +541,12 @@ fun AboutDialog(onDismiss: () -> Unit) { horizontalAlignment = Alignment.CenterHorizontally, ) { Text( - text = "About&Thanks", modifier = Modifier + text = stringResource(id = R.string.about_thanks), modifier = Modifier .padding(2.dp), color = MaterialTheme.colorScheme.onBackground ) - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) @@ -566,8 +575,8 @@ fun AboutDialog(onDismiss: () -> Unit) { contentAlignment = Alignment.CenterStart ) { Text( - text = "SourceCode", - Modifier.padding(start = 10.dp), + text = stringResource(R.string.sourcecode), + modifier = Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) } @@ -687,7 +696,10 @@ fun AboutDialog(onDismiss: () -> Unit) { .padding(8.dp) .fillMaxWidth(), ) { - Text("Ok", color = MaterialTheme.colorScheme.onBackground) + Text( + stringResource(id = R.string.confirm), + color = MaterialTheme.colorScheme.onBackground + ) } } } diff --git a/app/src/main/java/com/ztftrue/music/ui/other/TracksSelectPage.kt b/app/src/main/java/com/ztftrue/music/ui/other/TracksSelectPage.kt index 5422d42..845d67b 100644 --- a/app/src/main/java/com/ztftrue/music/ui/other/TracksSelectPage.kt +++ b/app/src/main/java/com/ztftrue/music/ui/other/TracksSelectPage.kt @@ -16,7 +16,7 @@ import androidx.compose.foundation.layout.size import androidx.compose.foundation.shape.CircleShape import androidx.compose.material.icons.Icons import androidx.compose.material.icons.filled.Done -import androidx.compose.material3.Divider +import androidx.compose.material3.HorizontalDivider import androidx.compose.material3.Icon import androidx.compose.material3.IconButton import androidx.compose.material3.MaterialTheme @@ -41,8 +41,8 @@ import com.ztftrue.music.play.ACTION_PlayLIST_CHANGE import com.ztftrue.music.sqlData.model.MusicItem import com.ztftrue.music.ui.public.BackButton import com.ztftrue.music.ui.public.TracksListView -import com.ztftrue.music.utils.model.AnyListBase import com.ztftrue.music.utils.PlayListType +import com.ztftrue.music.utils.model.AnyListBase import com.ztftrue.music.utils.trackManager.PlaylistManager @@ -139,7 +139,8 @@ fun TracksSelectPage( navController.popBackStack() } else { - Toast.makeText(context, "创建失败", Toast.LENGTH_SHORT) + Toast.makeText(context, + context.getString(R.string.create_failed), Toast.LENGTH_SHORT) .show() } } @@ -178,7 +179,7 @@ fun TracksSelectPage( .fillMaxSize() .padding(it) ) { - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) diff --git a/app/src/main/java/com/ztftrue/music/ui/play/CoverView.kt b/app/src/main/java/com/ztftrue/music/ui/play/CoverView.kt index 2239ad2..095d11e 100644 --- a/app/src/main/java/com/ztftrue/music/ui/play/CoverView.kt +++ b/app/src/main/java/com/ztftrue/music/ui/play/CoverView.kt @@ -26,6 +26,7 @@ import androidx.compose.runtime.remember import androidx.compose.runtime.setValue import androidx.compose.ui.Modifier import androidx.compose.ui.graphics.Color +import androidx.compose.ui.res.stringResource import androidx.compose.ui.unit.dp import coil.compose.rememberAsyncImagePainter import com.ztftrue.music.MusicViewModel @@ -52,7 +53,7 @@ fun CoverView(musicViewModel: MusicViewModel) { Image( painter = rememberAsyncImagePainter( paint ?: R.drawable.songs_thumbnail_cover - ), contentDescription = "Cover", + ), contentDescription = stringResource(R.string.cover), modifier = Modifier .fillMaxSize() .aspectRatio(1f) @@ -79,7 +80,7 @@ fun CoverView(musicViewModel: MusicViewModel) { fontSize = MaterialTheme.typography.titleSmall.fontSize ) Text( - text = "artist: ${it1.artist}", modifier = + text = stringResource(R.string.artist, it1.artist), modifier = Modifier .padding(0.dp) .height(30.dp) @@ -89,7 +90,7 @@ fun CoverView(musicViewModel: MusicViewModel) { fontSize = MaterialTheme.typography.titleSmall.fontSize ) Text( - text = "album: ${it1.album}", modifier = + text = stringResource(R.string.album, it1.album), modifier = Modifier .padding(top = 10.dp) .horizontalScroll(rememberScrollState(0)) diff --git a/app/src/main/java/com/ztftrue/music/ui/play/EqualizerView.kt b/app/src/main/java/com/ztftrue/music/ui/play/EqualizerView.kt index 800e08c..0aca28a 100644 --- a/app/src/main/java/com/ztftrue/music/ui/play/EqualizerView.kt +++ b/app/src/main/java/com/ztftrue/music/ui/play/EqualizerView.kt @@ -35,11 +35,13 @@ import androidx.compose.ui.graphics.TransformOrigin import androidx.compose.ui.graphics.graphicsLayer import androidx.compose.ui.layout.layout import androidx.compose.ui.platform.LocalContext +import androidx.compose.ui.res.stringResource import androidx.compose.ui.semantics.contentDescription import androidx.compose.ui.semantics.semantics import androidx.compose.ui.unit.Constraints import androidx.compose.ui.unit.dp import com.ztftrue.music.MusicViewModel +import com.ztftrue.music.R import com.ztftrue.music.play.ACTION_CHANGE_PITCH import com.ztftrue.music.play.ACTION_DSP_BAND import com.ztftrue.music.play.ACTION_DSP_BAND_FLATTEN @@ -96,7 +98,7 @@ fun EqualizerView(musicViewModel: MusicViewModel) { modifier = Modifier.fillMaxWidth() ) { Text( - text = "Pitch" + (pitch.floatValue).toString(), + text = stringResource(R.string.pitch) + (pitch.floatValue).toString(), color = MaterialTheme.colorScheme.onBackground ) OutlinedButton( @@ -113,7 +115,7 @@ fun EqualizerView(musicViewModel: MusicViewModel) { ) }, ) { - Text(text = "Reset", color = MaterialTheme.colorScheme.onBackground) + Text(text = stringResource(R.string.reset), color = MaterialTheme.colorScheme.onBackground) } } Slider( @@ -142,7 +144,7 @@ fun EqualizerView(musicViewModel: MusicViewModel) { modifier = Modifier.fillMaxWidth() ) { Text( - text = "Speed" + (speed.floatValue).toString(), + text = stringResource(R.string.speed) + (speed.floatValue).toString(), color = MaterialTheme.colorScheme.onBackground ) OutlinedButton( @@ -154,7 +156,7 @@ fun EqualizerView(musicViewModel: MusicViewModel) { ) }, ) { - Text(text = "Reset", color = MaterialTheme.colorScheme.onBackground) + Text(text = stringResource(R.string.reset), color = MaterialTheme.colorScheme.onBackground) } } Slider( @@ -187,7 +189,7 @@ fun EqualizerView(musicViewModel: MusicViewModel) { horizontalArrangement = Arrangement.SpaceBetween ) { Row(verticalAlignment = Alignment.CenterVertically) { - Text(text = "Echo", color = MaterialTheme.colorScheme.onBackground) + Text(text = stringResource(R.string.echo), color = MaterialTheme.colorScheme.onBackground) Box( modifier = Modifier .width(4.dp) @@ -235,7 +237,9 @@ fun EqualizerView(musicViewModel: MusicViewModel) { } } Text( - text = "Delay: " + (delayTime.floatValue).toString() + "seconds", + text = stringResource(R.string.delay)+(delayTime.floatValue)+stringResource( + R.string.seconds + ), color = MaterialTheme.colorScheme.onBackground ) Slider( @@ -260,7 +264,7 @@ fun EqualizerView(musicViewModel: MusicViewModel) { }, ) Text( - text = "Decay: " + (decay.floatValue).toString(), + text = stringResource(R.string.decay) + (decay.floatValue).toString(), color = MaterialTheme.colorScheme.onBackground ) Slider( @@ -305,7 +309,7 @@ fun EqualizerView(musicViewModel: MusicViewModel) { horizontalArrangement = Arrangement.SpaceBetween ) { Row(verticalAlignment = Alignment.CenterVertically) { - Text(text = "Equalizer", color = MaterialTheme.colorScheme.onBackground) + Text(text = stringResource(R.string.equalizer), color = MaterialTheme.colorScheme.onBackground) Box( modifier = Modifier .width(10.dp) @@ -357,7 +361,7 @@ fun EqualizerView(musicViewModel: MusicViewModel) { } else { Toast.makeText( context, - "flatten failed", + context.getString(R.string.flatten_failed), Toast.LENGTH_SHORT ).show() } @@ -367,7 +371,7 @@ fun EqualizerView(musicViewModel: MusicViewModel) { ) }, ) { - Text(text = "Flatten", color = MaterialTheme.colorScheme.onBackground) + Text(text = stringResource(R.string.flatten), color = MaterialTheme.colorScheme.onBackground) } } } diff --git a/app/src/main/java/com/ztftrue/music/ui/play/LyricsView.kt b/app/src/main/java/com/ztftrue/music/ui/play/LyricsView.kt index 47c27fe..75df692 100644 --- a/app/src/main/java/com/ztftrue/music/ui/play/LyricsView.kt +++ b/app/src/main/java/com/ztftrue/music/ui/play/LyricsView.kt @@ -49,6 +49,7 @@ import androidx.compose.ui.platform.LocalFocusManager import androidx.compose.ui.platform.LocalTextToolbar import androidx.compose.ui.platform.LocalView import androidx.compose.ui.platform.TextToolbarStatus +import androidx.compose.ui.res.stringResource import androidx.compose.ui.text.SpanStyle import androidx.compose.ui.text.TextStyle import androidx.compose.ui.text.buildAnnotatedString @@ -66,6 +67,7 @@ import androidx.compose.ui.window.PopupProperties import androidx.media3.common.util.UnstableApi import com.ztftrue.music.MainActivity import com.ztftrue.music.MusicViewModel +import com.ztftrue.music.R import com.ztftrue.music.utils.LyricsType import com.ztftrue.music.utils.Utils import com.ztftrue.music.utils.model.ListStringCaption @@ -261,7 +263,7 @@ fun LyricsView( if (musicViewModel.currentCaptionList.size == 0) { Column { Text( - text = "No Lyrics, Click to import lyrics.\n Support LRC/VTT/SRT/TXT", + text = stringResource(R.string.no_lyrics_import_tip), color = MaterialTheme.colorScheme.onBackground, fontSize = MaterialTheme.typography.titleLarge.fontSize, textAlign = TextAlign.Center, @@ -302,7 +304,7 @@ fun LyricsView( } ) Text( - text = "Or, Click to set lyrics folder", + text = stringResource(R.string.no_lyrics_set_folder), color = MaterialTheme.colorScheme.onBackground, fontSize = MaterialTheme.typography.titleLarge.fontSize, textAlign = TextAlign.Center, diff --git a/app/src/main/java/com/ztftrue/music/ui/play/PlayingPage.kt b/app/src/main/java/com/ztftrue/music/ui/play/PlayingPage.kt index f285a54..e6b8342 100644 --- a/app/src/main/java/com/ztftrue/music/ui/play/PlayingPage.kt +++ b/app/src/main/java/com/ztftrue/music/ui/play/PlayingPage.kt @@ -82,6 +82,7 @@ import androidx.compose.ui.input.pointer.pointerInput import androidx.compose.ui.platform.LocalConfiguration import androidx.compose.ui.platform.LocalContext import androidx.compose.ui.res.painterResource +import androidx.compose.ui.res.stringResource import androidx.compose.ui.semantics.contentDescription import androidx.compose.ui.semantics.semantics import androidx.compose.ui.text.style.TextAlign @@ -610,7 +611,7 @@ fun PlayingPage( .height(50.dp) ) { Text( - text = "Scroll", + text = stringResource(R.string.scroll), modifier = Modifier.padding(0.dp), color = MaterialTheme.colorScheme.onBackground, fontSize = TextUnit( @@ -660,7 +661,7 @@ fun PlayingPage( .height(50.dp) ) { Text( - text = "Highlight", + text = stringResource(R.string.highlight), modifier = Modifier.padding(0.dp), color = MaterialTheme.colorScheme.onBackground, fontSize = TextUnit( @@ -758,7 +759,7 @@ fun PlayingPage( if (list.isEmpty()) { Toast.makeText( context, - "Dictionary app is not installed or your dictionary app do not support this feature.", + stringResource(R.string.no_dictionary_app_tip), Toast.LENGTH_SHORT ).show() popupWindowDictionary = false @@ -781,7 +782,7 @@ fun PlayingPage( horizontalAlignment = Alignment.CenterHorizontally, ) { Text( - text = "Manage Dictionary App", modifier = Modifier + text = stringResource(R.string.manage_dictionary_app), modifier = Modifier .padding(2.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -808,12 +809,12 @@ fun PlayingPage( Box(modifier = Modifier.width(80.dp)) { } } Text( - text = "Show", + text = stringResource(R.string.show), modifier = Modifier.width(50.dp), color = MaterialTheme.colorScheme.onBackground ) Text( - text = "Auto go", + text = stringResource(R.string.auto_go), modifier = Modifier.width(80.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -856,14 +857,6 @@ fun PlayingPage( position = list.size - 1 } if (position != listIndex) { - Log.i( - "POSITION", - "p=$listIndex ,${position} position=${ - offset / 60.dp.toPx( - context - ) - }" - ) list.remove(item) list.add(position, item) } @@ -967,7 +960,7 @@ fun PlayingPage( .fillMaxWidth(0.5f), ) { Text( - "Cancel", + stringResource(id = R.string.cancel), color = MaterialTheme.colorScheme.onBackground ) } @@ -1114,7 +1107,7 @@ fun PlayingPage( } }, text = { Text( - text = item.name, + text = stringResource(id = Utils.translateMap[item.name] ?:R.string.app_name), color = MaterialTheme.colorScheme.onBackground, fontSize = 14.sp, ) @@ -1218,7 +1211,7 @@ fun PlayingPage( } else { if (viewModel.currentPlay.value != null) { Text( - text = "Get duration failed", + text = stringResource(R.string.get_duration_failed), color = MaterialTheme.colorScheme.onBackground ) } diff --git a/app/src/main/java/com/ztftrue/music/ui/public/AddPlayList.kt b/app/src/main/java/com/ztftrue/music/ui/public/AddPlayList.kt index b70d30c..bba0e29 100644 --- a/app/src/main/java/com/ztftrue/music/ui/public/AddPlayList.kt +++ b/app/src/main/java/com/ztftrue/music/ui/public/AddPlayList.kt @@ -21,7 +21,7 @@ import androidx.compose.foundation.text.KeyboardActions import androidx.compose.foundation.text.KeyboardOptions import androidx.compose.material.icons.Icons import androidx.compose.material.icons.filled.Add -import androidx.compose.material3.Divider +import androidx.compose.material3.HorizontalDivider import androidx.compose.material3.Icon import androidx.compose.material3.MaterialTheme import androidx.compose.material3.ProvideTextStyle @@ -40,6 +40,7 @@ import androidx.compose.ui.Modifier import androidx.compose.ui.draw.clip import androidx.compose.ui.draw.drawBehind import androidx.compose.ui.geometry.Offset +import androidx.compose.ui.res.stringResource import androidx.compose.ui.text.TextStyle import androidx.compose.ui.text.input.ImeAction import androidx.compose.ui.text.input.KeyboardType @@ -47,9 +48,10 @@ import androidx.compose.ui.unit.dp import androidx.compose.ui.window.Dialog import androidx.compose.ui.window.DialogProperties import com.ztftrue.music.MusicViewModel +import com.ztftrue.music.R import com.ztftrue.music.sqlData.model.MusicItem -import com.ztftrue.music.utils.model.MusicPlayList import com.ztftrue.music.utils.PlayListType +import com.ztftrue.music.utils.model.MusicPlayList @Composable fun CreatePlayListDialog( @@ -81,12 +83,12 @@ fun CreatePlayListDialog( horizontalAlignment = Alignment.CenterHorizontally, ) { Text( - text = "Create PlayList", + text = stringResource(R.string.create_playlist), modifier = Modifier .padding(2.dp), color = MaterialTheme.colorScheme.onBackground, ) - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) @@ -101,7 +103,7 @@ fun CreatePlayListDialog( }, label = { Text( - "Enter name", + text = stringResource(id = R.string.enter_name), color = MaterialTheme.colorScheme.onBackground ) }, @@ -154,12 +156,12 @@ fun CreatePlayListDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Cancel", - Modifier.padding(start = 10.dp), + text = stringResource(R.string.cancel), + modifier = Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground, ) } - Divider( + HorizontalDivider( modifier = Modifier .width(1.dp) .fillMaxWidth(0.1f) @@ -185,8 +187,8 @@ fun CreatePlayListDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Ok", - Modifier.padding(start = 10.dp), + text = stringResource(R.string.confirm), + modifier = Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground, ) } @@ -228,12 +230,12 @@ fun RenamePlayListDialog( horizontalAlignment = Alignment.CenterHorizontally, ) { Text( - text = "Rename PlayList name", + text = stringResource(R.string.rename_playlist), modifier = Modifier .padding(2.dp), color = MaterialTheme.colorScheme.onBackground, ) - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) @@ -246,7 +248,7 @@ fun RenamePlayListDialog( onValueChange = { playListName = it }, - label = { Text("Enter name") }, + label = { Text(stringResource(R.string.enter_name)) }, keyboardOptions = KeyboardOptions.Default.copy( imeAction = ImeAction.Done, keyboardType = KeyboardType.Text @@ -288,12 +290,12 @@ fun RenamePlayListDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Cancel", + text =stringResource(R.string.cancel), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground, ) } - Divider( + HorizontalDivider( modifier = Modifier .width(1.dp) .fillMaxWidth(0.1f) @@ -319,7 +321,7 @@ fun RenamePlayListDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Ok", + text = stringResource(R.string.confirm), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground, ) @@ -361,12 +363,12 @@ fun DeleteTip( horizontalAlignment = Alignment.CenterHorizontally, ) { Text( - text = "Confirm to delete $titleTip?", + text = stringResource(R.string.confirm_to_delete, titleTip), modifier = Modifier .padding(2.dp), color = MaterialTheme.colorScheme.onBackground, ) - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) @@ -395,11 +397,11 @@ fun DeleteTip( contentAlignment = Alignment.CenterStart ) { Text( - text = "Cancel", Modifier.padding(start = 10.dp), + text =stringResource(R.string.cancel), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground, ) } - Divider( + HorizontalDivider( modifier = Modifier .width(1.dp) .fillMaxWidth(0.1f) @@ -425,7 +427,7 @@ fun DeleteTip( contentAlignment = Alignment.CenterStart ) { Text( - text = "Confirm", + text = stringResource(R.string.confirm), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground, ) @@ -498,11 +500,11 @@ fun AddMusicToPlayListDialog( horizontalAlignment = Alignment.CenterHorizontally, ) { Text( - text = "Add ${musicItem?.name} Music To PlayList", modifier = Modifier + text = stringResource(R.string.add_music_to_playlist, musicItem?.name?:""), modifier = Modifier .padding(2.dp), color = MaterialTheme.colorScheme.onBackground ) - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) @@ -524,7 +526,7 @@ fun AddMusicToPlayListDialog( .clip(CircleShape), ) Text( - text = "Add New PlayList", + text = stringResource(R.string.add_new_playlist), color = MaterialTheme.colorScheme.onBackground, modifier = Modifier.horizontalScroll(rememberScrollState(0)) ) @@ -551,7 +553,7 @@ fun AddMusicToPlayListDialog( ) } } - Divider( + HorizontalDivider( color = MaterialTheme.colorScheme.inverseOnSurface, thickness = 1.2.dp ) @@ -575,7 +577,7 @@ fun AddMusicToPlayListDialog( }, contentAlignment = Alignment.CenterStart ) { - Text(text = "Cancel", Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground) + Text(text = stringResource(R.string.cancel), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground) } } } diff --git a/app/src/main/java/com/ztftrue/music/ui/public/BackButton.kt b/app/src/main/java/com/ztftrue/music/ui/public/BackButton.kt index 5098011..ca6990e 100644 --- a/app/src/main/java/com/ztftrue/music/ui/public/BackButton.kt +++ b/app/src/main/java/com/ztftrue/music/ui/public/BackButton.kt @@ -1,7 +1,7 @@ package com.ztftrue.music.ui.public import androidx.compose.material.icons.Icons -import androidx.compose.material.icons.filled.ArrowBack +import androidx.compose.material.icons.automirrored.filled.ArrowBack import androidx.compose.material3.Icon import androidx.compose.material3.IconButton import androidx.compose.material3.MaterialTheme @@ -20,14 +20,14 @@ fun BackButton( navigateUp() } } - // CompositioinLocal + // Composition Local // val LocalNavigationProvider = staticCompositionLocalOf { ... } // LocalNavigationProvider provides navController // setValue // val navController = LocalNavigationProvider.current // useValue IconButton(onClick = { navController.navigateBack { } }) { - Icon(Icons.Filled.ArrowBack, contentDescription = "Back", tint = MaterialTheme.colorScheme.onBackground) + Icon(Icons.AutoMirrored.Filled.ArrowBack, contentDescription = "Back", tint = MaterialTheme.colorScheme.onBackground) } } diff --git a/app/src/main/java/com/ztftrue/music/ui/public/MusicItemView.kt b/app/src/main/java/com/ztftrue/music/ui/public/MusicItemView.kt index e815307..79c6c77 100644 --- a/app/src/main/java/com/ztftrue/music/ui/public/MusicItemView.kt +++ b/app/src/main/java/com/ztftrue/music/ui/public/MusicItemView.kt @@ -25,7 +25,7 @@ import androidx.compose.foundation.shape.CircleShape import androidx.compose.material.icons.Icons import androidx.compose.material.icons.filled.MoreVert import androidx.compose.material3.Checkbox -import androidx.compose.material3.Divider +import androidx.compose.material3.HorizontalDivider import androidx.compose.material3.Icon import androidx.compose.material3.IconButton import androidx.compose.material3.MaterialTheme @@ -45,6 +45,7 @@ import androidx.compose.ui.geometry.Offset import androidx.compose.ui.graphics.ColorFilter import androidx.compose.ui.platform.LocalContext import androidx.compose.ui.res.painterResource +import androidx.compose.ui.res.stringResource import androidx.compose.ui.unit.dp import androidx.compose.ui.window.Dialog import androidx.compose.ui.window.DialogProperties @@ -414,10 +415,10 @@ fun OperateDialog( ) { Text( color = MaterialTheme.colorScheme.onBackground, - text = "Operate ${music.name}", modifier = Modifier + text = stringResource(R.string.operate_music, music.name), modifier = Modifier .padding(2.dp) ) - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) @@ -446,8 +447,8 @@ fun OperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Remove from queue", - Modifier.padding(start = 10.dp), + text = stringResource(R.string.remove_from_queue), + modifier = Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) } @@ -472,7 +473,7 @@ fun OperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Add to queue", + text = stringResource(id = R.string.add_to_queue), modifier = Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground, ) @@ -497,8 +498,8 @@ fun OperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Play next", - Modifier.padding(start = 10.dp), + text = stringResource(id = R.string.play_next), + modifier = Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) } @@ -522,8 +523,8 @@ fun OperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Add to playlist", - Modifier.padding(start = 10.dp), + text = stringResource(R.string.add_to_playlist), + modifier = Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) } @@ -547,8 +548,8 @@ fun OperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Remove from current playlist", - Modifier.padding(start = 10.dp), + text = stringResource(R.string.remove_from_current_playlist), + modifier = Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) } @@ -596,7 +597,7 @@ fun OperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Artist", Modifier.padding(start = 10.dp), + text = stringResource(id = R.string.artist,music.artist), modifier = Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) } @@ -619,7 +620,7 @@ fun OperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Album", Modifier.padding(start = 10.dp), + text = stringResource(id = R.string.album,music.album), modifier = Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) } @@ -642,7 +643,7 @@ fun OperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Music info", + text = stringResource(R.string.music_info), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -666,7 +667,7 @@ fun OperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Delete from storage", + text = stringResource(R.string.delete_from_storage), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -686,7 +687,7 @@ fun OperateDialog( .padding(8.dp) .fillMaxWidth(), ) { - Text("Cancel", color = MaterialTheme.colorScheme.onBackground) + Text(stringResource(R.string.cancel), color = MaterialTheme.colorScheme.onBackground) } } } diff --git a/app/src/main/java/com/ztftrue/music/ui/public/QueuePage.kt b/app/src/main/java/com/ztftrue/music/ui/public/QueuePage.kt index 560015e..8e8617c 100644 --- a/app/src/main/java/com/ztftrue/music/ui/public/QueuePage.kt +++ b/app/src/main/java/com/ztftrue/music/ui/public/QueuePage.kt @@ -17,7 +17,7 @@ import androidx.compose.foundation.lazy.LazyColumn import androidx.compose.foundation.shape.CircleShape import androidx.compose.material.icons.Icons import androidx.compose.material.icons.filled.MoreVert -import androidx.compose.material3.Divider +import androidx.compose.material3.HorizontalDivider import androidx.compose.material3.Icon import androidx.compose.material3.IconButton import androidx.compose.material3.MaterialTheme @@ -35,6 +35,7 @@ import androidx.compose.ui.draw.clip import androidx.compose.ui.draw.drawBehind import androidx.compose.ui.geometry.Offset import androidx.compose.ui.platform.LocalContext +import androidx.compose.ui.res.stringResource import androidx.compose.ui.unit.dp import androidx.compose.ui.window.Dialog import androidx.compose.ui.window.DialogProperties @@ -42,6 +43,7 @@ import androidx.media3.common.util.UnstableApi import androidx.navigation.NavHostController import com.ztftrue.music.MusicViewModel import com.ztftrue.music.QueuePlayList +import com.ztftrue.music.R import com.ztftrue.music.play.ACTION_CLEAR_QUEUE import com.ztftrue.music.play.ACTION_PlayLIST_CHANGE import com.ztftrue.music.utils.OperateType @@ -113,7 +115,7 @@ fun QueuePage( ACTION_PlayLIST_CHANGE, null, null ) } else { - Toast.makeText(context, "创建失败", Toast.LENGTH_SHORT) + Toast.makeText(context, context.getString(R.string.create_failed), Toast.LENGTH_SHORT) .show() } } @@ -181,10 +183,10 @@ fun QueueOperateDialog( ) { Text( color = MaterialTheme.colorScheme.onBackground, - text = "Operate current queue", modifier = Modifier + text = stringResource(R.string.operate_current_queue), modifier = Modifier .padding(2.dp) ) - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) @@ -211,7 +213,7 @@ fun QueueOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Save current queue to playlist", + text = stringResource(R.string.save_current_queue_to_playlist), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -237,7 +239,7 @@ fun QueueOperateDialog( contentAlignment = Alignment.CenterStart ) { Text( - text = "Clear current queue", + text = stringResource(R.string.clear_current_queue), Modifier.padding(start = 10.dp), color = MaterialTheme.colorScheme.onBackground ) @@ -259,7 +261,7 @@ fun QueueOperateDialog( .padding(8.dp) .fillMaxWidth(), ) { - Text("Cancel", color = MaterialTheme.colorScheme.onBackground) + Text(stringResource(R.string.cancel), color = MaterialTheme.colorScheme.onBackground) } } } diff --git a/app/src/main/java/com/ztftrue/music/ui/public/TopBar.kt b/app/src/main/java/com/ztftrue/music/ui/public/TopBar.kt index 507ba10..987f240 100644 --- a/app/src/main/java/com/ztftrue/music/ui/public/TopBar.kt +++ b/app/src/main/java/com/ztftrue/music/ui/public/TopBar.kt @@ -22,9 +22,9 @@ import androidx.compose.foundation.text.KeyboardOptions import androidx.compose.material.icons.Icons import androidx.compose.material.icons.filled.Search import androidx.compose.material3.Checkbox -import androidx.compose.material3.Divider import androidx.compose.material3.ElevatedButton import androidx.compose.material3.ExperimentalMaterial3Api +import androidx.compose.material3.HorizontalDivider import androidx.compose.material3.Icon import androidx.compose.material3.IconButton import androidx.compose.material3.MaterialTheme @@ -42,12 +42,15 @@ import androidx.compose.ui.Alignment import androidx.compose.ui.Modifier import androidx.compose.ui.draw.clip import androidx.compose.ui.graphics.ColorFilter +import androidx.compose.ui.platform.LocalContext import androidx.compose.ui.res.painterResource +import androidx.compose.ui.res.stringResource import androidx.compose.ui.semantics.contentDescription import androidx.compose.ui.semantics.semantics import androidx.compose.ui.text.TextStyle import androidx.compose.ui.text.input.ImeAction import androidx.compose.ui.text.input.KeyboardType +import androidx.compose.ui.text.intl.Locale import androidx.compose.ui.text.style.TextAlign import androidx.compose.ui.unit.dp import androidx.compose.ui.window.Dialog @@ -67,6 +70,7 @@ fun TopBar( musicViewModel: MusicViewModel, content: @Composable RowScope.() -> Unit ) { + var context = LocalContext.current var showDialog by remember { mutableStateOf(false) } val timerIcon: Int = if (musicViewModel.remainTime.longValue == 0L) { R.drawable.set_timer @@ -100,7 +104,7 @@ fun TopBar( modifier = Modifier .size(50.dp) .semantics { - contentDescription = "Search" + contentDescription = context.getString(R.string.search) }, onClick = { navController.navigate( @@ -145,12 +149,12 @@ fun SleepTimeDialog(musicViewModel: MusicViewModel, onDismiss: () -> Unit) { horizontalAlignment = Alignment.CenterHorizontally, ) { Text( - text = "Sleep timer", modifier = Modifier + text = stringResource(R.string.sleep_timer), modifier = Modifier .padding(2.dp), color = MaterialTheme.colorScheme.onBackground ) - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) @@ -166,17 +170,23 @@ fun SleepTimeDialog(musicViewModel: MusicViewModel, onDismiss: () -> Unit) { verticalAlignment = Alignment.CenterVertically ) { Text( - text = "Running: ${Utils.formatTime(musicViewModel.remainTime.longValue)}", + text = stringResource( + R.string.running, + Utils.formatTime(musicViewModel.remainTime.longValue) + ), color = MaterialTheme.colorScheme.onBackground ) TextButton( onClick = { onConfirmation(0L) }, modifier = Modifier.padding(8.dp), ) { - Text("Stop", color = MaterialTheme.colorScheme.onBackground) + Text( + text = stringResource(R.string.stop), + color = MaterialTheme.colorScheme.onBackground + ) } } - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) @@ -194,12 +204,12 @@ fun SleepTimeDialog(musicViewModel: MusicViewModel, onDismiss: () -> Unit) { musicViewModel.playCompleted.value = it }) Text( - text = "Play completed last song", + text = stringResource(R.string.play_completed_last_song), color = MaterialTheme.colorScheme.onBackground ) } - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) @@ -221,7 +231,7 @@ fun SleepTimeDialog(musicViewModel: MusicViewModel, onDismiss: () -> Unit) { modifier = Modifier ) { Text( - text = "${sleepT[item]}\n minutes", + text = stringResource(R.string.minutes, sleepT[item]), textAlign = TextAlign.Center, color = MaterialTheme.colorScheme.onBackground ) @@ -245,7 +255,7 @@ fun SleepTimeDialog(musicViewModel: MusicViewModel, onDismiss: () -> Unit) { }, label = { Text( - "Enter minutes", + text = stringResource(R.string.enter_minutes), color = MaterialTheme.colorScheme.onBackground ) }, @@ -262,7 +272,10 @@ fun SleepTimeDialog(musicViewModel: MusicViewModel, onDismiss: () -> Unit) { .fillMaxWidth() .padding(bottom = 16.dp), suffix = { - Text("minutes", color = MaterialTheme.colorScheme.onBackground) + Text( + text = stringResource(id = R.string.minutes), + color = MaterialTheme.colorScheme.onBackground + ) }, ) } @@ -277,7 +290,10 @@ fun SleepTimeDialog(musicViewModel: MusicViewModel, onDismiss: () -> Unit) { onClick = { onDismiss() }, modifier = Modifier.padding(8.dp), ) { - Text("Dismiss", color = MaterialTheme.colorScheme.onBackground) + Text( + stringResource(id = R.string.cancel), + color = MaterialTheme.colorScheme.onBackground + ) } TextButton( onClick = { @@ -287,7 +303,10 @@ fun SleepTimeDialog(musicViewModel: MusicViewModel, onDismiss: () -> Unit) { }, modifier = Modifier.padding(8.dp), ) { - Text("Confirm", color = MaterialTheme.colorScheme.onBackground) + Text( + stringResource(id = R.string.confirm), + color = MaterialTheme.colorScheme.onBackground + ) } } @@ -301,11 +320,12 @@ fun SleepTimeDialog(musicViewModel: MusicViewModel, onDismiss: () -> Unit) { @OptIn(ExperimentalMaterial3Api::class) @Composable fun BackTopBar( - navController: NavHostController + navController: NavHostController, + text: String ) { TopAppBar( navigationIcon = { BackButton(navController) }, title = { - Text(text = "Settings", color = MaterialTheme.colorScheme.onBackground) + Text(text = text, color = MaterialTheme.colorScheme.onBackground) }) } \ No newline at end of file diff --git a/app/src/main/java/com/ztftrue/music/ui/public/TracksListView.kt b/app/src/main/java/com/ztftrue/music/ui/public/TracksListView.kt index edeedc2..41f8dde 100644 --- a/app/src/main/java/com/ztftrue/music/ui/public/TracksListView.kt +++ b/app/src/main/java/com/ztftrue/music/ui/public/TracksListView.kt @@ -11,7 +11,7 @@ import androidx.compose.foundation.lazy.LazyColumn import androidx.compose.foundation.lazy.rememberLazyListState import androidx.compose.foundation.rememberScrollState import androidx.compose.foundation.shape.CircleShape -import androidx.compose.material3.Divider +import androidx.compose.material3.HorizontalDivider import androidx.compose.material3.MaterialTheme import androidx.compose.material3.SmallFloatingActionButton import androidx.compose.material3.Text @@ -22,6 +22,7 @@ import androidx.compose.runtime.snapshots.SnapshotStateList import androidx.compose.ui.Modifier import androidx.compose.ui.graphics.ColorFilter import androidx.compose.ui.res.painterResource +import androidx.compose.ui.res.stringResource import androidx.compose.ui.semantics.contentDescription import androidx.compose.ui.semantics.semantics import androidx.compose.ui.unit.dp @@ -54,11 +55,13 @@ fun TracksListView( key(tracksList) { if (tracksList.size == 0) { Text( - text = "No music", + text = stringResource(R.string.no_music), color = MaterialTheme.colorScheme.onBackground, - modifier = Modifier.horizontalScroll(rememberScrollState(0)).semantics { - contentDescription = "No music" - } + modifier = Modifier + .horizontalScroll(rememberScrollState(0)) + .semantics { + contentDescription = "No music" + } ) }else{ ConstraintLayout( @@ -88,7 +91,7 @@ fun TracksListView( selectStatus, selectList, ) - Divider( + HorizontalDivider( color = MaterialTheme.colorScheme.inverseOnSurface, thickness = 1.2.dp ) diff --git a/app/src/main/java/com/ztftrue/music/ui/public/TracksPage.kt b/app/src/main/java/com/ztftrue/music/ui/public/TracksPage.kt index a227fe9..edd2e71 100644 --- a/app/src/main/java/com/ztftrue/music/ui/public/TracksPage.kt +++ b/app/src/main/java/com/ztftrue/music/ui/public/TracksPage.kt @@ -19,7 +19,7 @@ import androidx.compose.foundation.shape.CircleShape import androidx.compose.material.icons.Icons import androidx.compose.material.icons.filled.Add import androidx.compose.material.icons.filled.MoreVert -import androidx.compose.material3.Divider +import androidx.compose.material3.HorizontalDivider import androidx.compose.material3.Icon import androidx.compose.material3.IconButton import androidx.compose.material3.MaterialTheme @@ -431,7 +431,7 @@ fun TracksListPage( albumListDefault = albumsList, scrollDirection = ScrollDirectionType.GRID_HORIZONTAL ) - Divider( + HorizontalDivider( modifier = Modifier .fillMaxWidth() .height(1.dp) diff --git a/app/src/main/java/com/ztftrue/music/ui/theme/Theme.kt b/app/src/main/java/com/ztftrue/music/ui/theme/Theme.kt index a17c898..9cf293d 100644 --- a/app/src/main/java/com/ztftrue/music/ui/theme/Theme.kt +++ b/app/src/main/java/com/ztftrue/music/ui/theme/Theme.kt @@ -98,7 +98,7 @@ fun MusicPitchTheme( musicViewModel: MusicViewModel, darkTheme: Boolean = isSystemInDarkTheme(), // Dynamic color is available on Android 12+ - dynamicColor: Boolean = false, + dynamicColor: Boolean = true, content: @Composable () -> Unit ) { val context = LocalContext.current @@ -110,17 +110,14 @@ fun MusicPitchTheme( musicViewModel.themeSelected.intValue, musicViewModel.currentPlay.value ) { - // "Follow System", "Light", "Dark", "Follow Music Cover" + // "Follow System", "Light", "Dark", "Follow Music Cover","material you" if (musicViewModel.themeSelected.intValue == 0) { - colorScheme.value = - if (dynamicColor && Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) if (darkTheme) dynamicDarkColorScheme( - context - ) else dynamicLightColorScheme(context) else if (darkTheme) DarkColorScheme else LightColorScheme + colorScheme.value = if (darkTheme) DarkColorScheme else LightColorScheme } else if (musicViewModel.themeSelected.intValue == 1) { colorScheme.value = LightColorScheme } else if (musicViewModel.themeSelected.intValue == 2) { colorScheme.value = DarkColorScheme - } else { + } else if (musicViewModel.themeSelected.intValue == 3) { val bitmap = musicViewModel.getCurrentMusicCover() if (bitmap != null) { Palette.from(bitmap).generate { palette -> // 从 Palette 中获取颜色信息 @@ -150,9 +147,14 @@ fun MusicPitchTheme( context ) else dynamicLightColorScheme(context) else if (darkTheme) DarkColorScheme else LightColorScheme } + } else if (musicViewModel.themeSelected.intValue == 4) { + if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.S) { + colorScheme.value = + if (darkTheme) dynamicDarkColorScheme(context) else dynamicLightColorScheme( + context + ) + } } - - } val view = LocalView.current if (!view.isInEditMode) { diff --git a/app/src/main/java/com/ztftrue/music/utils/Utils.kt b/app/src/main/java/com/ztftrue/music/utils/Utils.kt index b471551..f79105b 100644 --- a/app/src/main/java/com/ztftrue/music/utils/Utils.kt +++ b/app/src/main/java/com/ztftrue/music/utils/Utils.kt @@ -10,6 +10,7 @@ import android.text.TextUtils import android.widget.Toast import androidx.core.content.FileProvider import com.ztftrue.music.MusicViewModel +import com.ztftrue.music.R import com.ztftrue.music.play.ACTION_AddPlayQueue import com.ztftrue.music.play.ACTION_GET_TRACKS import com.ztftrue.music.play.ACTION_PlayLIST_CHANGE @@ -25,7 +26,6 @@ import java.io.FileInputStream import java.io.FileOutputStream import java.io.IOException - enum class OperateTypeInActivity { DeletePlayList, InsertTrackToPlaylist, @@ -46,7 +46,6 @@ enum class PlayListType { None } - fun enumToStringForPlayListType(myEnum: PlayListType): String { return myEnum.name } @@ -88,7 +87,26 @@ enum class ScrollDirectionType { @Suppress("deprecation") object Utils { - val items = listOf("Follow System", "Light", "Dark", "Follow Music Cover") + val translateMap = HashMap().apply { + put("Songs", R.string.tab_songs) + put("PlayLists", R.string.tab_playLists) + put("Queue", R.string.tab_queue) + put("Albums", R.string.tab_albums) + put("Artists", R.string.tab_artists) + put("Genres", R.string.tab_genres) + put("Folders", R.string.tab_folders) + put("Cover", R.string.tab_cover) + put("Lyrics", R.string.tab_lyrics) + put("Equalizer", R.string.tab_equalizer) + } + + val items = listOf( + R.string.theme_follow_system, + R.string.theme_light, + R.string.theme_dark, + R.string.theme_follow_music_cover, + R.string.theme_material_you, + ) var kThirdOct = doubleArrayOf( 31.5, 63.0, 125.0, 250.0, 500.0, 1000.0, 2000.0, 4000.0, 8000.0, 16000.0 ) @@ -103,7 +121,7 @@ object Utils { ) - fun initSettingsData(musicViewModel: MusicViewModel,context: Context) { + fun initSettingsData(musicViewModel: MusicViewModel, context: Context) { CoroutineScope(Dispatchers.IO).launch { musicViewModel.themeSelected.intValue = context.getSharedPreferences( @@ -155,8 +173,6 @@ object Utils { } - - fun openFile(path: String, minType: String = "text/plain", context: Context) { val fileUri: Uri val outputImage = File(path) @@ -261,7 +277,7 @@ object Utils { } ) } else { - Toast.makeText(context, "创建失败", Toast.LENGTH_SHORT) + Toast.makeText(context, context.getString(R.string.create_failed), Toast.LENGTH_SHORT) .show() } } @@ -281,7 +297,7 @@ object Utils { ACTION_PlayLIST_CHANGE, null, null ) } else { - Toast.makeText(context, "创建失败", Toast.LENGTH_SHORT) + Toast.makeText(context, context.getString(R.string.create_failed), Toast.LENGTH_SHORT) .show() } } diff --git a/app/src/main/java/com/ztftrue/music/utils/trackManager/PlaylistManager.kt b/app/src/main/java/com/ztftrue/music/utils/trackManager/PlaylistManager.kt index a3733be..58e20ac 100644 --- a/app/src/main/java/com/ztftrue/music/utils/trackManager/PlaylistManager.kt +++ b/app/src/main/java/com/ztftrue/music/utils/trackManager/PlaylistManager.kt @@ -21,6 +21,7 @@ import androidx.annotation.RequiresApi import androidx.media.MediaBrowserServiceCompat import androidx.media3.common.util.UnstableApi import com.ztftrue.music.MainActivity +import com.ztftrue.music.R import com.ztftrue.music.sqlData.model.MusicItem import com.ztftrue.music.utils.model.MusicPlayList import com.ztftrue.music.utils.OperateTypeInActivity @@ -215,7 +216,6 @@ object PlaylistManager { bundle.putString("action", OperateTypeInActivity.InsertTrackToPlaylist.name) bundle.putParcelable("uri", uri) bundle.putParcelableArrayList("values", contentValues) - Log.i("addMusicsToPlaylist", uri.toString()) try { val pendingIntent = MediaStore.createWriteRequest(resolver, listOf(uri)) val intentSenderRequest: IntentSenderRequest = @@ -465,7 +465,8 @@ object PlaylistManager { .build() context.modifyMediaLauncher.launch(intentSenderRequest) } catch (e: Exception) { - Toast.makeText(context, "You system don't support this feature", Toast.LENGTH_SHORT) + Toast.makeText(context, + context.getString(R.string.cant_support_feature_tip), Toast.LENGTH_SHORT) .show() e.printStackTrace() } diff --git a/app/src/main/res/values-zh/strings.xml b/app/src/main/res/values-zh/strings.xml new file mode 100644 index 0000000..2f3584a --- /dev/null +++ b/app/src/main/res/values-zh/strings.xml @@ -0,0 +1,102 @@ + + 小怪兽音乐 + + 专辑: + 艺术家: + 歌词作者-作曲家: + LRC贡献者: + Re: + 标题: + Version: + 没有歌词,请点击导入.\n 支持 LRC/VTT/SRT/TXT 格式的文件 + 或者, 点击设置歌词文件夹 + 管理选项卡 + 优先选择内置歌词 + 关于 + 选择主题: + 警告: 不是所有歌曲都在这里显示,例如,这里不显示 Ringtones 和 Notifications 文件夹 + 艺术家: %1$s + 专辑: %1$s + 封面 + 操作 + 添加到队列 + 播放下一首 + 添加到播放列表 + "音调: " + 重置 + "速度: " + 回音 + "延迟: " + "衰减: " + 均衡器 + 展平失败 + 展平 + 没有权限,无法获取音乐,点击设置权限 + %1$s 歌曲%2$s + 设置 + 反馈 + 退出APP + + 到Github + 发送邮件 + 错误提示 + 出现意外错误 + 播放通知 + 没有歌曲 + %1$s\n 分钟 + 等待歌曲播放结束 + 清除当前队列 + 保存当前队列到列表 + 修改当前队列 + 从存储中删除 + 歌曲信息 + 从列表中移除移除 + 从队列中移除 + 添加新的播放列表 + 源代码 + 确定 + 重命名播放列表 + 删除播放列表 + 滚动 + 高亮 + 管理单词软件 + 显示 + 自动 + 获取时长失败 + 创建播放列表 + 取消 + 确认删除 %1$s? + 添加 %1$s 到播放列表 + 睡眠时间 + 倒计时: %1$s + 操作 %1$s + + 停止 + 输入名称 + 在此输入文本 + + 流派 + 搜索 + 歌曲 + 播放列表 + 当前队列 + 专辑 + 艺术家 + 流派 + 文件夹 + 封面 + 歌词 + 音效 + 跟随系统 + + + 跟随音乐封面 + Material You + 输入分钟数 + 标题 + 创建失败 + 多次播放错误, 暂停播放 + 播放错误, 自动播放下一首 + 没有安装字典应用, 或者已安装的应用不支持这个功能 + 当前系统不支持这个功能 + \ No newline at end of file diff --git a/app/src/main/res/values/strings.xml b/app/src/main/res/values/strings.xml index 6b8a1f2..9c9d04a 100644 --- a/app/src/main/res/values/strings.xml +++ b/app/src/main/res/values/strings.xml @@ -8,4 +8,95 @@ Re: Title: Version: + No Lyrics, Click to import lyrics.\n Support LRC/VTT/SRT/TXT + Or, Click to set lyrics folder + Manage tab items + Prefer embedded lyrics + About + Select theme: + Warning: Not all tracks can be show in other tabs, they don\'t show that in Ringtones or Notifications folders + artist: %1$s + album: %1$s + Cover + Operate + Add to queue + Play next + Add to playlist + Pitch + Reset + Speed + Echo + "Delay: " + "Decay: " + Equalizer + flatten failed + Flatten + Can\'t find any audio file\n I need permission\nClick here to open settings + %1$s song%2$s + Settings + FeedBack + Exit APP + + To github + Send email + Error tip + error_feedback_tip + PlayNotify + No music + %1$s\n minutes + Play completed last song + Clear current queue + Save current queue to playlist + Operate current queue + Delete from storage + Music info + Remove from current playlist + Remove from queue + Add New PlayList + SourceCode + Confirm + Rename playlist + Delete playlist + Scroll + Highlight + Manage Dictionary App + Show + Auto go + Get duration failed + Create PlayList + Cancel + Confirm to delete %1$s? + Add %1$s Music To PlayList + Sleep timer + Running: %1$s + Operate %1$s + seconds + Stop + Enter name + Enter text to search + Year + Genre + search + Songs + PlayLists + Queue + Albums + Artists + Genres + Folders + Cover + Lyrics + Equalizer + + Light + Dark + Follow Music Cover + Material You + Enter minutes + Title + Create failed + Many times play error, Play paused + Play error, auto play next + Dictionary app is not installed or your dictionary app do not support this feature. + You system don\'t support this feature \ No newline at end of file diff --git a/settings.gradle.kts b/settings.gradle.kts index 7180ee0..2b37913 100644 --- a/settings.gradle.kts +++ b/settings.gradle.kts @@ -10,10 +10,6 @@ dependencyResolutionManagement { repositories { google() mavenCentral() - maven( - // for Tarsos - url = "https://mvn.0110.be/releases" - ) maven( url = "https://jitpack.io" )