- Timestamp:
- 03/08/16 23:22:09 (8 years ago)
- Location:
- trunk/src/org/apollo
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/org/apollo/audio/ApolloPlaybackMixer.java
r375 r1007 324 324 if (playbackThread == null) { 325 325 326 playbackThread = new PlaybackThread(); 326 AudioFormat audio_format = SampledAudioManager.getInstance().getDefaultPlaybackFormat(); 327 if (audio_format.getChannels()==2) { 328 playbackThread = new StereoPlaybackThread(); 329 } 330 else { 331 playbackThread = new MonoPlaybackThread(); 332 } 327 333 playbackThread.start(); 328 334 … … 451 457 * 452 458 */ 453 private class PlaybackThread extends Thread {454 455 pr ivateSourceDataLine srcDataLine; // never null456 457 pr ivateboolean isStopping = false;458 459 pr ivateint bufferFrameLength;460 pr ivateboolean isOutputBigEndian;459 private abstract class PlaybackThread extends Thread { 460 461 protected SourceDataLine srcDataLine; // never null 462 463 protected boolean isStopping = false; 464 465 protected int bufferFrameLength; 466 protected boolean isOutputBigEndian; 461 467 462 468 /** … … 465 471 * @throws LineUnavailableException 466 472 */ 467 PlaybackThread() throws LineUnavailableException {468 super( "Apollo Playback Mixer Thread");473 protected PlaybackThread(String threadName) throws LineUnavailableException { 474 super(threadName); 469 475 super.setPriority(Thread.MAX_PRIORITY); 470 476 … … 494 500 * If failed to acquire the source data line. 495 501 */ 496 pr ivatevoid aquireSourceDataLine() throws LineUnavailableException {502 protected void aquireSourceDataLine() throws LineUnavailableException { 497 503 498 504 // Select an audio output format … … 508 514 509 515 // Cache useful data 510 bufferFrameLength = srcDataLine.getBufferSize() / 2;516 bufferFrameLength = srcDataLine.getBufferSize() / (2*2); // 2=stereo, 2=16-bit 511 517 isOutputBigEndian = srcDataLine.getFormat().isBigEndian(); 512 518 … … 527 533 528 534 /** 529 * Note: even if all tracks have been proc cessed in the audio pipeline, it will535 * Note: even if all tracks have been processed in the audio pipeline, it will 530 536 * commence another pass to check for new tracks added to the graph before finishing. 531 537 * … … 540 546 * @return the best audio format for playback... 541 547 */ 542 pr ivateAudioFormat getAudioFormat() {548 protected AudioFormat getAudioFormat() { 543 549 return SampledAudioManager.getInstance().getDefaultPlaybackFormat(); 544 550 } 545 551 552 /** 553 * The audio mixing pipeline 554 */ 555 public abstract void run(); 556 557 558 559 } 560 561 private class StereoPlaybackThread extends PlaybackThread { 562 563 564 565 /** 566 * Initantly prepares for audio playback: Opens the (stereo) source data line for output 567 * 568 * @throws LineUnavailableException 569 */ 570 StereoPlaybackThread() throws LineUnavailableException { 571 super("Apollo Stereo Playback Mixer Thread"); 572 573 } 574 575 576 /** 577 * The audio mixing pipeline 578 */ 579 public void run() { 580 581 // Notify observers that some audio has started playing 582 ApolloPlaybackMixer.this.fireSubjectChangedLaterOnSwingThread( 583 new SubjectChangedEvent(ApolloSubjectChangedEvent.PLAYBACK_STARTED)); 584 585 // All tracks to play per pass 586 List<TrackSequence> tracksToPlay = new LinkedList<TrackSequence>(); 587 588 // Keeps track of tracks to remove 589 List<TrackSequence> completedTracks = new LinkedList<TrackSequence>(); 590 591 // The buffer written directly to the source data line 592 byte[] sampleBuffer = new byte[2 * 2 * bufferFrameLength]; // 2=stereo, 2=16-bit samples 593 594 // The mixed frames, where each element refers to a frame 595 int[] mixedFrameBufferL = new int[bufferFrameLength]; 596 int[] mixedFrameBufferR = new int[bufferFrameLength]; 597 598 // Helpers declared outside loop for mz efficiency 599 int msbL, lsbL; 600 int msbR, lsbR; 601 int sampleL; 602 int sampleR; 603 int totalFramesMixed; 604 int trackCount; // tracks to play at a given pass 605 boolean isMoreQueued; // True if there are more tracks queued. 606 int frameIndex; 607 int i; 608 609 // Begin writing to the source data line 610 if (srcDataLine.isOpen()) 611 srcDataLine.start(); 612 else return; 613 614 // keep playing as long as line is open (and there is something to play) 615 try 616 { 617 while (srcDataLine.isOpen()) { // The audio mixing pipline 618 619 // First decide on which tracks to play ... and remove any finished tracks. 620 synchronized(sequenceGraph) { 621 622 // If there are no more tracks queued for playing, then exit the 623 // playback thread. 624 if (sequenceGraph.isEmpty()) 625 return; 626 627 isMoreQueued = false; 628 completedTracks.clear(); 629 tracksToPlay.clear(); 630 631 for (TrackSequence ts : sequenceGraph) { 632 633 // Has this track sequence finished? 634 if (ts.currentFrame > ts.endFrame || ts.stopPending) 635 completedTracks.add(ts); 636 637 // Is this track playing / is meant to start laying in this pass? 638 else if (ts.initiationFrame <= (timelineFrame + bufferFrameLength)) 639 tracksToPlay.add(ts); 640 641 // If it is not time to play the track yet, then 642 // neither will it for all proceeding tracks either 643 // since they are ordered by their initiation time. 644 else break; 645 646 } 647 648 // Get rid of tracks that have finished playing. Notify models that they have stopped 649 for (TrackSequence staleTS : completedTracks) { 650 651 sequenceGraph.remove(staleTS); 652 653 staleTS.onStopped((staleTS.currentFrame > staleTS.endFrame) 654 ? staleTS.endFrame : staleTS.currentFrame); 655 656 //removeTrackFromGraph(staleTS, staleTS.endFrame); 657 } 658 659 trackCount = tracksToPlay.size(); 660 isMoreQueued = sequenceGraph.size() > trackCount; 661 662 // If there is nothing queued and there are no tracks to play, 663 // then playback is finished. 664 if (!isMoreQueued && trackCount == 0) 665 return; 666 667 } // release lock 668 669 totalFramesMixed = 0; // this will be set to the maximum amount of frames that were mixed accross all tracks 670 671 // Clear audio buffer 672 for (i = 0; i < bufferFrameLength; i++) { 673 // TODO: Efficient way of clearing buffer? 674 mixedFrameBufferL[i] = 0; 675 mixedFrameBufferR[i] = 0; 676 } 677 678 // Perform Mixing : 679 // Convert the sample size to 16-bit always for best precision while 680 // processing audio in the mix pipeline.... 681 for (TrackSequence ts : tracksToPlay) { 682 683 // Notify model that initiated 684 if (!ts.isPlaying()) ts.onInitiated(timelineFrame); 685 686 // Skip muted / unsoloed tracks - they add nothing to the sample mix 687 if (ts.isMuted || (isSoloEnable && !ts.isSolo)) { 688 689 // Make sure start where initiated, if not already initiated 690 if (ts.initiationFrame >= timelineFrame && ts.initiationFrame < (timelineFrame + bufferFrameLength)) { 691 692 // Get index in frame buffer where to initiate 693 frameIndex = (int)(ts.initiationFrame - timelineFrame); 694 695 // Calcuate the length of frames to buffer - adjust silent tracks position 696 ts.currentFrame += (bufferFrameLength - frameIndex); 697 698 } else { // skip full buffer of bytes ... silenced 699 700 ts.currentFrame += bufferFrameLength; // currentFrame can go outside endframe boundry of the track 701 702 } 703 704 totalFramesMixed = bufferFrameLength; 705 706 } else { // Get samples and add to mix 707 708 // If the track is yet to initiate - part way through the buffer, then start adding bytes 709 // at initiation point 710 if (ts.initiationFrame >= timelineFrame && ts.initiationFrame < (timelineFrame + bufferFrameLength)) { 711 712 frameIndex = (int)(ts.initiationFrame - timelineFrame); 713 714 } else { 715 716 frameIndex = 0; 717 718 } 719 720 // For each frame 721 for (;frameIndex < bufferFrameLength && ts.currentFrame <= ts.endFrame; frameIndex++) { 722 723 // Get sample according to byte order 724 int base_posL = ts.currentFrame * (2*2); // 2=stereo, 2=16-bit 725 int base_posR = base_posL+2; 726 727 if (ts.isBigEndian) { 728 729 // First byte is MSB (high order) 730 msbL = (int)ts.playbackAudioBytes[base_posL]; 731 732 // Second byte is LSB (low order) 733 lsbL = (int)ts.playbackAudioBytes[base_posL + 1]; 734 735 // And again for the right channel 736 msbR= (int)ts.playbackAudioBytes[base_posR]; 737 lsbR = (int)ts.playbackAudioBytes[base_posR + 1]; 738 739 } else { 740 741 // First byte is LSB (low order) 742 lsbL = (int)ts.playbackAudioBytes[base_posL]; 743 744 // Second byte is MSB (high order) 745 msbL = (int)ts.playbackAudioBytes[base_posL+1]; 746 747 // And again for the right channel 748 lsbR = (int)ts.playbackAudioBytes[base_posR]; 749 msbR = (int)ts.playbackAudioBytes[base_posR+1]; 750 } 751 752 sampleL = (msbL << 0x8) | (0xFF & lsbL); 753 sampleR = (msbR << 0x8) | (0xFF & lsbR); 754 755 // Apply track volume 756 sampleL = (int)(sampleL * ts.volume); 757 sampleR = (int)(sampleR * ts.volume); 758 759 // Add to current mix 760 mixedFrameBufferL[frameIndex] += sampleL; 761 mixedFrameBufferR[frameIndex] += sampleR; 762 763 // Get next sample 764 ts.currentFrame++; 765 } 766 767 768 // Keep track of total frames mixed in buffer 769 if (frameIndex > totalFramesMixed) 770 totalFramesMixed = frameIndex; 771 } 772 773 } // Mix in next track 774 775 // totalFramesMixed is the amount of frames to play. 776 // If it is zero then it means that there are tracks yet to be initiated, and nothing currently playing 777 assert (totalFramesMixed <= bufferFrameLength); 778 assert (totalFramesMixed > 0 || 779 (totalFramesMixed == 0 && trackCount == 0 && isMoreQueued)); 780 781 // Post mix with master settings 782 if (isMasterMuteOn) { // Silence sample buffer if master mute is on 783 784 for (i = 0; i < sampleBuffer.length; i++) { 785 sampleBuffer[i] = 0; 786 } 787 788 // Let the muted bytes play 789 totalFramesMixed = bufferFrameLength; 790 791 } else { // otherwise apply master volume 792 793 for (i = 0; i < totalFramesMixed; i++) { 794 795 // Average tracks 796 //mixedFrameBuffer[i] /= trackCount; // depreciated 797 798 // Apply master volume 799 mixedFrameBufferL[i] = (int)(mixedFrameBufferL[i] * masterVolume); 800 mixedFrameBufferR[i] = (int)(mixedFrameBufferR[i] * masterVolume); 801 802 // Clip 803 if (mixedFrameBufferL[i] > Short.MAX_VALUE) mixedFrameBufferL[i] = Short.MAX_VALUE; 804 else if (mixedFrameBufferL[i] < Short.MIN_VALUE) mixedFrameBufferL[i] = Short.MIN_VALUE; 805 806 if (mixedFrameBufferR[i] > Short.MAX_VALUE) mixedFrameBufferR[i] = Short.MAX_VALUE; 807 else if (mixedFrameBufferR[i] < Short.MIN_VALUE) mixedFrameBufferR[i] = Short.MIN_VALUE; 808 809 // Convert to output format 810 lsbL = (mixedFrameBufferL[i] & 0xFF); 811 msbL = ((mixedFrameBufferL[i] >> 8) & 0xFF); 812 lsbR = (mixedFrameBufferR[i] & 0xFF); 813 msbR = ((mixedFrameBufferR[i] >> 8) & 0xFF); 814 815 int base_posL = i * (2 * 2); // 2=stereo, 2=16-bits 816 int base_posR = base_posL + 2; 817 if (isOutputBigEndian) { 818 sampleBuffer[base_posL] = (byte)msbL; 819 sampleBuffer[base_posL+1] = (byte)lsbL; 820 sampleBuffer[base_posR] = (byte)msbR; 821 sampleBuffer[base_posR+1] = (byte)lsbR; 822 } else { 823 sampleBuffer[base_posL] = (byte)lsbL; 824 sampleBuffer[base_posL+1] = (byte)msbL; 825 sampleBuffer[base_posR] = (byte)lsbR; 826 sampleBuffer[base_posR+1] = (byte)msbR; 827 } 828 829 } 830 831 } 832 833 // Generate silence only if there are more tracks to be played. 834 // Note that this could be false, but a track might have been queued after 835 // setting the isMoreQueued flag. In such cases... silence is not wanted anyway! 836 if (isMoreQueued) { 837 for (i = totalFramesMixed; i < bufferFrameLength; i++) { // will skip if no need to generate silence 838 int base_posL = i * (2 * 2); // 2=stereo, 2=16-bits 839 int base_posR = base_posL + 2; 840 841 sampleBuffer[base_posL] = 0; 842 sampleBuffer[base_posL+1] = 0; 843 sampleBuffer[base_posR] = 0; 844 sampleBuffer[base_posR+1] = 0; 845 } 846 // Ensure that full buffer is played ... including the silence 847 totalFramesMixed = bufferFrameLength; 848 } 849 850 // Write processed bytes to line out stream and update the timeline frame 851 srcDataLine.write( 852 sampleBuffer, 853 0, 854 totalFramesMixed * (2 * 2)); // 2=stereo, 2=16-bits 855 856 // Update timeline counter for sequencing management 857 timelineFrame += totalFramesMixed; 858 859 // The timelineFrame should always be larger or equal to the live frame position 860 assert(timelineFrame >= srcDataLine.getLongFramePosition()); 861 862 } // Next pass 863 864 } finally { 865 866 isStopping = true; 867 868 // Ensure line freed 869 if (srcDataLine.isOpen()) { 870 srcDataLine.drain(); // avoids chopping off last buffered chunk 871 srcDataLine.close(); 872 } 873 874 // Clear sequence graph. 875 synchronized(sequenceGraph) { 876 877 for (TrackSequence track : sequenceGraph) { 878 879 track.onStopped((track.currentFrame > track.endFrame) 880 ? track.endFrame : track.currentFrame); 881 } 882 883 sequenceGraph.clear(); 884 885 } 886 887 // Notify observers that playback has finished. 888 ApolloPlaybackMixer.this.fireSubjectChangedLaterOnSwingThread( 889 new SubjectChangedEvent(ApolloSubjectChangedEvent.PLAYBACK_STOPPED)); 890 891 } 892 893 } 894 } 895 896 private class MonoPlaybackThread extends PlaybackThread { 897 898 899 900 /** 901 * Initantly prepares for audio playback: Opens the (stereo) source data line for output 902 * 903 * @throws LineUnavailableException 904 */ 905 MonoPlaybackThread() throws LineUnavailableException { 906 super("Apollo Mono Playback Mixer Thread"); 907 908 } 909 546 910 /** 547 911 * The audio mixing pipeline … … 637 1001 638 1002 // Clear audio buffer 639 for (i = 0; i < bufferFrameLength; i++) // TODO: Eff ecient way of clearing buffer?1003 for (i = 0; i < bufferFrameLength; i++) // TODO: Efficient way of clearing buffer? 640 1004 mixedFrameBuffer[i] = 0; 641 1005 … … 746 1110 //mixedFrameBuffer[i] /= trackCount; // depreciated 747 1111 748 // Apply mast ar volume1112 // Apply master volume 749 1113 mixedFrameBuffer[i] = (int)(mixedFrameBuffer[i] * masterVolume); 750 1114 … … 781 1145 } 782 1146 783 // Write proc cessed bytes to line out stream and update the timeline frame1147 // Write processed bytes to line out stream and update the timeline frame 784 1148 srcDataLine.write( 785 1149 sampleBuffer, … … 826 1190 } 827 1191 828 829 830 } 1192 } 831 1193 832 1194 -
trunk/src/org/apollo/audio/SampledAudioManager.java
r355 r1007 14 14 import org.apollo.mvc.SubjectChangedEvent; 15 15 import org.apollo.util.ApolloSystemLog; 16 16 17 17 18 /** … … 27 28 28 29 /** All internal formats have the same sample rate. */ 29 public static final float PLAYBACK_SAMPLE_RATE = 44100.0f; // Audacitys default 30 31 //public static final float PLAYBACK_SAMPLE_RATE = 22050.0f; // Meldexes internal rate .. todo: fix conversions to use better rate 32 33 // Used for describing the ideal default format for recorded audio and converting un-supported 30 // The following is based on Audacity's defaults for playback 31 public static final float PLAYBACK_SAMPLE_RATE = 44100.0f; 32 public static final int PLAYBACK_BITS_PER_SAMPLE = 16; 33 public static final int PLAYBACK_NUM_CHANNELS = 2; // stereo 34 35 //public static final float PLAYBACK_SAMPLE_RATE = 22050.0f; // Meldexe's internal rate .. todo: fix conversions to use better rate 36 37 // Used for describing the ideal default format for recorded audio and converting unsupported 34 38 // imported audio to... The actual formats may differ depending on the data lines used 35 39 36 // note: Must be PCM, Mono, 16 bit.40 // note: Must be PCM, with values as defined for PLAYBACK_... above 37 41 private static final AudioFormat DESIRED_FORMAT = new AudioFormat( // Linear PCM Encoding 38 PLAYBACK_SAMPLE_RATE, // Always conform to PLAYBACK_SAMPLE_RATE 39 16, // bits per sample. Must be 16. Audacitys default40 1, // Always use mono. Audacitys default42 PLAYBACK_SAMPLE_RATE, // Always conform to PLAYBACK_SAMPLE_RATE etc 43 PLAYBACK_BITS_PER_SAMPLE, 44 PLAYBACK_NUM_CHANNELS, 41 45 false, // ALWAYS USED SIGNED FOR BEST PERFORMACE - JAVA DOES NOT HAVE UNSIGNED TYPES 42 46 true // Byte order … … 231 235 // Not cadidate if not in appollos format. 232 236 if (!candiate.getEncoding().toString().startsWith("PCM") 233 || candiate.getChannels() != 1234 || candiate.getSampleSizeInBits() != 16237 || candiate.getChannels() != PLAYBACK_NUM_CHANNELS 238 || candiate.getSampleSizeInBits() != PLAYBACK_BITS_PER_SAMPLE 235 239 || (candiate.getSampleRate() != AudioSystem.NOT_SPECIFIED && 236 240 candiate.getSampleRate() != PLAYBACK_SAMPLE_RATE)) … … 285 289 /** 286 290 * Determines if an audio format requires conversion in order to be used 287 * in Apollos. 288 * 289 * Audio formats must be in PCM, mono, 16-bit sample-size, 291 * in Apollo's. 292 * 293 * Audio formats must be in PCM, SampledAudioManager#PLAYBACK_NUM_CHANNELS, 294 * SampledAudioManager#PLAYBACK_BITS_PER_SAMPLE sample-size, 290 295 * SampledAudioManager#PLAYBACK_SAMPLE_RATE sample-rate and be supported 291 296 * by the output mixer. … … 304 309 if (format == null) throw new NullPointerException("format"); 305 310 306 if(!format.getEncoding().toString().startsWith("PCM") || format.getChannels() != 1307 || format.getSampleSizeInBits() != 16311 if(!format.getEncoding().toString().startsWith("PCM") || format.getChannels() != PLAYBACK_NUM_CHANNELS 312 || format.getSampleSizeInBits() != PLAYBACK_BITS_PER_SAMPLE 308 313 || (format.getSampleRate() != AudioSystem.NOT_SPECIFIED && 309 314 format.getSampleRate() != PLAYBACK_SAMPLE_RATE)) { … … 313 318 // Check that the format is supported by the output mixer 314 319 for (AudioFormat supported : supportedPlaybackFormats) { 315 if (supported.getChannels() != 1) continue;320 if (supported.getChannels() != PLAYBACK_NUM_CHANNELS) continue; 316 321 if ( 317 322 format.getEncoding() == supported.getEncoding() -
trunk/src/org/apollo/gui/DualPeakTroughWaveFormRenderer.java
r353 r1007 14 14 15 15 private int sampleSize; 16 private int numChannels; 16 17 private boolean isBigEndian; 17 18 private boolean isSigned; … … 36 37 37 38 sampleSize = audioFormat.getSampleSizeInBits(); 39 numChannels = audioFormat.getChannels(); 38 40 isSigned = audioFormat.getEncoding().toString().startsWith("PCM_SIGN"); 39 41 isBigEndian = audioFormat.isBigEndian(); … … 81 83 if (sampleSize == 16) { 82 84 85 int shift_multiplier = numChannels; // <<1=16-bit-mono, <<2=16-bit-stereo 83 86 for (int i = 0; i < aggregationCount; i++) { 84 87 85 88 int max = 0, min = 0, sample; // could use short, but int avoids casting everywhere 86 89 87 int startFrameIndex = (startFrame + (i * aggregationSize)) << 1;88 int endFrameIndex = startFrameIndex + (aggregationSize << 1);90 int startFrameIndex = (startFrame + (i * aggregationSize)) << shift_multiplier; 91 int endFrameIndex = startFrameIndex + (aggregationSize << shift_multiplier); 89 92 90 93 for (int k = startFrameIndex; k < endFrameIndex; k+=2) { 91 94 95 // k+=2 works for both mono and stereo 96 // in the case of stereo k+=2 alternates between L and R values. 97 // net effect is that it still finds the min and max values across 98 // all samples: startFrameIndex .. endFrameIndex 99 92 100 int lsb, msb; 93 101 … … 128 136 } else if (sampleSize == 8) { 129 137 138 int shift_multiplier = numChannels-1; // <<0=8-bit-mono, <<1=8-bit-stereo 139 140 // 'i' loop below works for either mono or stereo without any adjustment 141 // for same reason above given for 'k' loop 142 130 143 if (isSigned) { 131 144 145 132 146 // Find the peak within the block of aggregated frames 133 147 for (int i = 0; i < amplitudes.length; i++) { … … 135 149 byte max = 0, absmax = -1, sample, abssample; 136 150 137 int startFrameIndex = startFrame + (i * aggregationSize);138 int endFrameIndex = startFrameIndex + aggregationSize;151 int startFrameIndex = (startFrame + (i * aggregationSize)) << shift_multiplier; 152 int endFrameIndex = startFrameIndex + (aggregationSize << shift_multiplier); 139 153 140 154 for (int k = startFrameIndex; k < endFrameIndex; k++) { … … 160 174 int max = 0, absmax = -1, sample, abssample; // could use short, but int avoid casting everywhere 161 175 162 int startFrameIndex = startFrame + (i * aggregationSize);163 int endFrameIndex = startFrameIndex + aggregationSize;176 int startFrameIndex = (startFrame + (i * aggregationSize)) << shift_multiplier; 177 int endFrameIndex = startFrameIndex + (aggregationSize<<shift_multiplier); 164 178 165 179 for (int k = startFrameIndex; k < endFrameIndex; k++) { -
trunk/src/org/apollo/gui/SampledTrackGraphView.java
r355 r1007 92 92 public SampledTrackGraphView() { 93 93 94 // Keep backings consist ant with component resize94 // Keep backings consistent with component resize 95 95 this.addComponentListener(new ComponentListener() { 96 96 public void componentHidden(ComponentEvent e) { -
trunk/src/org/apollo/io/AudioIO.java
r315 r1007 21 21 import org.apollo.mvc.SubjectChangedEvent; 22 22 import org.apollo.util.AudioMath; 23 23 24 24 25 /** … … 44 45 * 45 46 * @throws IOException 46 * If failed to create file for saving, or an error occur ed while writing audio bytes.47 * If failed to create file for saving, or an error occurred while writing audio bytes. 47 48 * 48 49 * @throws UnsupportedAudioFileException … … 505 506 assert (SampledAudioManager.getInstance().isFormatSupportedForPlayback(sampleFormat)); 506 507 507 // Initialize the ByteBuffer - and size if possible (not possible for va irable frame size encoding)508 // Initialize the ByteBuffer - and size if possible (not possible for variable frame size encoding) 508 509 ByteArrayOutputStream loadedBytes = (fformat.getFrameLength() != AudioSystem.NOT_SPECIFIED) ? 509 510 new ByteArrayOutputStream(fformat.getFrameLength() * sampleFormat.getFrameSize()) : … … 511 512 512 513 byte[] buffer = new byte[sampleFormat.getFrameSize() * (int)sampleFormat.getFrameRate()]; 514 513 515 int bytesRead = 0; 514 516 int totalBytesRead = 0; … … 568 570 */ 569 571 public static void main(String[] args) { 570 if (args.length == 1) { 571 572 System.out.println("Testing with " + args[0]); 572 573 String args_zero = null; 574 575 if (args.length==0) { 576 // Hardwire to whatever is a meaningful example for you! 577 args_zero = "C:\\Temp\\GoodTime_Preview.mp3"; 578 } 579 else if (args.length==1) { 580 args_zero = args[0]; 581 582 } 583 584 System.out.println("Testing with " + args_zero); 573 585 574 586 try { 575 LoadedAudioData loaded = loadAudioFile(new File(args[0]), null); 587 LoadedAudioData loaded = loadAudioFile(new File(args_zero), null); 588 576 589 if (loaded != null && loaded.getAudioBytes() != null) { 577 590 … … 580 593 if (loaded.wasConverted()) { 581 594 savePCMAudioToWaveFile( 582 args [0]+ "_pre-converted.wav",595 args_zero + "_pre-converted.wav", 583 596 loaded.getAudioBytes(), 584 597 loaded.getAudioFormat()); … … 603 616 604 617 savePCMAudioToWaveFile( 605 args [0]+ "_converted.wav",618 args_zero + "_converted.wav", 606 619 stdData, 607 620 lowQualFormat); … … 625 638 return; 626 639 627 } 628 629 System.err.println("Must supply 1 argument: the file path to convert"); 640 641 //System.err.println("Must supply 1 argument: the file path to convert"); 630 642 631 643 -
trunk/src/org/apollo/widgets/SampledTrack.java
r375 r1007 88 88 private SampledTrackModel trackModel = null; 89 89 90 /** Used for the loading phase. Can change se rveal times of a lifetime. for example after import and save it becomes local */90 /** Used for the loading phase. Can change several times of a lifetime. for example after import and save it becomes local */ 91 91 private String loadFilenameArgument = null; // preproceeds with ARG_IMPORT_TAG if importing. 92 92 93 private String localFileName; // Immutable - assigned on const uction93 private String localFileName; // Immutable - assigned on construction 94 94 95 95 /** Used for dumping audio to a temp file when deleted - to free memory. … … 152 152 trackModel = new SampledTrackModel(audioBytes, format, localFileName); 153 153 154 // Ensure that the model is marked as modi efied so that it will save154 // Ensure that the model is marked as modified so that it will save 155 155 trackModel.setAudioModifiedFlag(true); 156 156
Note:
See TracChangeset
for help on using the changeset viewer.