Changeset 1556
- Timestamp:
- 05/10/21 11:21:12 (3 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/org/apollo/widgets/SampledTrack.java
r1516 r1556 15 15 import java.awt.geom.Rectangle2D; 16 16 import java.io.File; 17 import java.io.FileInputStream; 17 18 import java.io.IOException; 19 import java.io.InputStream; 18 20 import java.lang.reflect.InvocationTargetException; 19 21 import java.util.LinkedList; … … 21 23 22 24 import javax.sound.sampled.AudioFormat; 25 import javax.sound.sampled.AudioFormat.Encoding; 26 import javax.sound.sampled.AudioInputStream; 23 27 import javax.sound.sampled.LineUnavailableException; 24 28 import javax.sound.sampled.UnsupportedAudioFileException; … … 48 52 import org.apollo.io.AudioPathManager; 49 53 import org.apollo.io.IconRepository; 54 import org.apollo.io.LoadedAudioData; 50 55 import org.apollo.items.EmulatedTextItem; 51 56 import org.apollo.items.EmulatedTextItem.TextChangeListener; … … 80 85 import org.expeditee.items.widgets.Widget; 81 86 87 88 import javax.sound.sampled.AudioFormat; 89 import javax.sound.sampled.AudioSystem; 90 import javax.sound.sampled.LineUnavailableException; 91 import javax.sound.sampled.UnsupportedAudioFileException; 92 93 import be.tarsos.dsp.AudioDispatcher; 94 import be.tarsos.dsp.GainProcessor; 95 import be.tarsos.dsp.WaveformSimilarityBasedOverlapAdd; 96 import be.tarsos.dsp.WaveformSimilarityBasedOverlapAdd.Parameters; 97 import be.tarsos.dsp.io.TarsosDSPAudioFormat; 98 import be.tarsos.dsp.io.UniversalAudioInputStream; 99 import be.tarsos.dsp.io.jvm.AudioDispatcherFactory; 100 import be.tarsos.dsp.io.jvm.AudioPlayer; 101 import be.tarsos.dsp.io.jvm.WaveformWriter; 102 82 103 /** 83 104 * The sampled track widgets in apollo. … … 2072 2093 } 2073 2094 2095 // Adapted from: 2096 // https://stackoverflow.com/questions/50631179/converting-stereo-to-mono-using-tarsosdsp-does-not-work 2097 2098 private UniversalAudioInputStream convertToMono(AudioInputStream sourceStream) 2099 { 2100 AudioInputStream targetStream = null; 2101 2102 AudioFormat sourceFormat = sourceStream.getFormat(); 2103 2104 // is already mono? 2105 /*if(sourceFormat.getChannels() == 1) { 2106 return sourceStream; 2107 } 2108 */ 2109 if (sourceFormat.getChannels() > 1) { 2110 2111 AudioFormat targetFormat = new AudioFormat( 2112 sourceFormat.getEncoding(), 2113 sourceFormat.getSampleRate(), 2114 sourceFormat.getSampleSizeInBits(), 2115 1, 2116 // this is the important bit, the framesize needs to change as well, 2117 // for framesize 4, this calculation leads to new framesize 2 2118 (sourceFormat.getSampleSizeInBits() + 7) / 8, 2119 sourceFormat.getFrameRate(), 2120 sourceFormat.isBigEndian()); 2121 2122 targetStream = AudioSystem.getAudioInputStream(targetFormat, sourceStream); 2123 } 2124 else { 2125 targetStream = sourceStream; 2126 } 2127 2128 // better to express in terms of targetFormat !!! 2129 boolean is_signed = (sourceFormat.getEncoding() == Encoding.PCM_SIGNED); 2130 2131 TarsosDSPAudioFormat ts_audioFormat = new TarsosDSPAudioFormat( 2132 /* sample rate */ sourceFormat.getSampleRate(), 2133 /* HERE sample size in bits */ sourceFormat.getSampleSizeInBits(), 2134 /* number of channels */ 1, 2135 /* signed/unsigned data */ is_signed, 2136 /* big-endian byte order */sourceFormat.isBigEndian() 2137 ); 2138 2139 UniversalAudioInputStream uis = new UniversalAudioInputStream(targetStream, ts_audioFormat); 2140 2141 return uis; 2142 } 2143 2144 private SampledTrackModel timeStretch(SampledTrackModel orig_track_model, double time_stretch_factor) 2145 { 2146 SampledTrackModel stretched_track = null; 2147 2148 try { 2149 String stretched_local_target = null; 2150 File stretched_target_file = null; 2151 boolean orig_is_already_timestretched = false; 2152 2153 2154 String orig_source = orig_track_model.getFilepath(); 2155 String orig_local_filename = orig_track_model.getLocalFilename(); 2156 2157 String orig_source_root = orig_source.substring(0,orig_source.lastIndexOf('.')); 2158 2159 if (orig_source_root.matches("-ts[0-9]+(\\.[0-9]+)$")) { 2160 // already hooked up to a time-stretched file 2161 stretched_local_target = orig_local_filename; 2162 stretched_target_file = new File(orig_source); 2163 2164 orig_is_already_timestretched = true; 2165 } 2166 2167 if (!orig_is_already_timestretched) { 2168 2169 // Need to make 44.1 kHz mono version (if does not already exist) 2170 2171 2172 String orig_local_filename_root = orig_local_filename.substring(0,orig_local_filename.lastIndexOf('.')); 2173 2174 String stretched_target = orig_source_root + "-ts" + time_stretch_factor + ".wav"; 2175 stretched_local_target = orig_local_filename_root + "-ts" + time_stretch_factor + ".wav"; 2176 2177 System.out.println("***** Checking for existence of target output wav file: " + stretched_target); 2178 2179 stretched_target_file = new File(stretched_target); 2180 if (!stretched_target_file.exists()) { 2181 2182 File orig_source_file = new File(orig_source); 2183 //AudioFormat orig_format = AudioSystem.getAudioFileFormat(orig_source_file).getFormat(); 2184 2185 AudioInputStream orig_audio_input_stream = AudioSystem.getAudioInputStream(orig_source_file); 2186 2187 UniversalAudioInputStream mono_audio_input_stream = convertToMono(orig_audio_input_stream); 2188 TarsosDSPAudioFormat ts_audioFormat = mono_audio_input_stream.getFormat(); 2189 2190 //AudioFormat mono_format = mono_audio_input_stream.getFormat(); 2191 2192 2193 2194 2195 //double orig_sample_rate = orig_format.getSampleRate(); 2196 //double mono_sample_rate = mono_format.getSampleRate(); 2197 2198 2199 //https://stackoverflow.com/questions/31365172/tarsosdsp-pitch-detection-from-wav-file-and-the-result-frequency-is-always-les 2200 /* 2201 InputStream is = new FileInputStream(orig_source_file); 2202 2203 TarsosDSPAudioFormat ts_audioFormat = new TarsosDSPAudioFormat( 2204 / * sample rate * / 44100, 2205 / * HERE sample size in bits * / 16, // used to be 32 2206 / * number of channels * / 1, 2207 / * signed/unsigned data * / true, 2208 / * big-endian byte order * / false 2209 ); 2210 UniversalAudioInputStream uis = new UniversalAudioInputStream(is, ts_audioFormat); 2211 2212 */ 2213 2214 //AudioDispatcher dispatcher = new AudioDispatcher(uis, 2048, 0); 2215 2216 2217 2218 // Consider allowing finer grained control of WSOLA params through @annotations: 2219 // int sequenceMs 2220 // int seekWindowMs 2221 // int overlapMs 2222 float ts_sample_rate = ts_audioFormat.getSampleRate(); 2223 Parameters slowdown_params = Parameters.slowdownDefaults(time_stretch_factor,ts_sample_rate); 2224 WaveformSimilarityBasedOverlapAdd wsola = new WaveformSimilarityBasedOverlapAdd(slowdown_params); 2225 2226 WaveformWriter stretched_target_writer = new WaveformWriter(ts_audioFormat,stretched_target); 2227 2228 //AudioDispatcher dispatcher = AudioDispatcherFactory.fromFile(orig_source_file,wsola.getInputBufferSize(),wsola.getOverlap()); 2229 AudioDispatcher dispatcher = new AudioDispatcher(mono_audio_input_stream,wsola.getInputBufferSize(),wsola.getOverlap()); 2230 wsola.setDispatcher(dispatcher); 2231 dispatcher.addAudioProcessor(wsola); 2232 dispatcher.addAudioProcessor(stretched_target_writer); 2233 System.out.println("Starting TimeStetch with factor: " + time_stretch_factor + "..."); 2234 dispatcher.run(); 2235 System.out.println("... done"); 2236 2237 //AudioFormat stretched_target_format = AudioSystem.getAudioFileFormat(stretched_target_file).getFormat(); 2238 2239 } 2240 2241 LoadedAudioData stretched_audio_data = AudioIO.loadAudioFile(stretched_target_file, null); 2242 2243 // loading audio data could have been cancelled 2244 if (stretched_audio_data != null) { 2245 // Create the track 2246 stretched_track = new SampledTrackModel( 2247 stretched_audio_data.getAudioBytes(), 2248 stretched_audio_data.getAudioFormat(), 2249 stretched_local_target); 2250 2251 stretched_track.setFilepath(stretched_target); 2252 } 2253 else { 2254 System.out.println("Cancelled loading in time-stretched WAV file: " + stretched_target) 2255 ; } 2256 } 2257 2258 } 2259 catch (Exception e) { 2260 e.printStackTrace(); 2261 } 2262 2263 return stretched_track; 2264 } 2265 2074 2266 private void playPauseResume() throws LineUnavailableException { 2075 2267 if (!SoundDesk.getInstance().isPlaying(trackMix.getChannelID())) { // play / resume … … 2113 2305 2114 2306 if (startFrame < endFrame) { 2307 2308 // Conditions are right, to play! 2309 2310 /* 2311 Frame current = DisplayController.getCurrentFrame(); 2312 // Or perhaps (??) we should be getting the current frame via: 2313 //Frame current = getSource().getParentOrCurrentFrame(); 2314 2315 //Text time_stretch_annotation = current.getAnnotation("TimeStretchFactor"); 2316 String time_stretch_annotation_valuestr = current.getAnnotationValue("TimeStretchFactor"); 2317 2318 if (time_stretch_annotation_valuestr != null) { 2319 //String time_stretch_str = time_stretch_annotation.getText(); 2320 //time_stretch_str = time_stretch_str.replaceFirst("^\\s*@time-stretch-factor(:?)\\s*",""); 2321 //System.out.println("**** time stretch str = " + time_stretch_annotation_valuestr); 2322 2323 double time_stretch = Double.parseDouble(time_stretch_annotation_valuestr); 2324 2325 if (time_stretch != 1.0) { 2326 SampledTrackModel stretched_track_model = timeStretch(trackModel,time_stretch); 2327 if (stretched_track_model != null) { 2328 trackModel = stretched_track_model; 2329 } 2330 } 2331 } 2332 */ 2333 2334 2115 2335 SoundDesk.getInstance().playSampledTrackModel( 2116 2336 trackModel,
Note:
See TracChangeset
for help on using the changeset viewer.