/****************************************************************************** |* THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF |* ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO |* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A |* PARTICULAR PURPOSE. |* |* Copyright 1995-2005 Nero AG and its licensors. All Rights Reserved. |*----------------------------------------------------------------------------- |* NeroSDK / NeroVisionAPI |* |* PROGRAM: ContentProvider.cpp |* |* PURPOSE: Test application for the NeroVision API: Sample content provider ******************************************************************************/ #include "stdafx.h" #include "ContentResolver.h" #include #include using namespace NeroVisionAPI; #define STREAM_DURATION 600000000 #ifndef PI #define PI 3.1415926535897932384626433832795 #endif // Converts passed string into morse code. Returns a string representation of morse // code using characters '.', '-', ' '. For each unknown character a '?' is inserted into // the output string. static std::string morse(const char * in) { static unsigned char tab[] = { 0x3F, 0x3E, 0x3C, 0x38, 0x30, 0x20, 0x21, 0x23, 0x27, 0x2F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x11, 0x15, 0x09, 0x02, 0x14, 0x0B, 0x10, 0x04, 0x1E, 0x0D, 0x12, 0x07, 0x05, 0x0F, 0x16, 0x1B, 0x0A, 0x08, 0x03, 0x0C, 0x18, 0x0E, 0x19, 0x1D, 0x13 }; std::string out; while (*in) { if (out.size()) out.append(1, ' '); unsigned int ch = toupper(*in); if (ch == ' ') out.append(" "); else if (ch >= '0' && ch <= 'Z') { unsigned char mc = tab[ch-'0']; while (mc & 0xFE) { out.append(1, mc & 1 ? '-' : '.'); mc >>= 1; } } else out.append(1, '?'); in++; } return out; } HRESULT GetInterface(LPUNKNOWN pUnk, void **ppv) { if (!ppv) return E_POINTER; *ppv = pUnk; pUnk->AddRef(); return S_OK; } // implementation of IAVStreamSample class StreamSample: public BaseObj, public IAVStreamSample { public: IMPL_IUNKNOWN IMPL_INTERFACE(IAVStreamSample) StreamSample(const LONGLONG& start, const LONGLONG& stop, void* data, DWORD size): m_start(start), m_stop(stop), m_data(data), m_size(size) {} virtual ~StreamSample() { free(m_data); } STDMETHODIMP raw_GetTime(LONGLONG* start, LONGLONG* stop) { *start = m_start; *stop = m_stop; return S_OK; } STDMETHODIMP raw_GetData(DWORD* size, void** pData) { *size = m_size; *pData = m_data; return S_OK; } private: LONGLONG m_start, m_stop; void* m_data; DWORD m_size; }; // base implementation of IAVStream, the classes VideoContentStream and AudioContentStream // are derived from this class class BaseContentStream: public BaseObj, public IAVStream { public: BaseContentStream(const std::wstring& name, const LONGLONG& len): m_len(len), m_name(name) {} IMPL_IUNKNOWN IMPL_INTERFACE(IAVStream) STDMETHODIMP raw_SetPosition(LONGLONG* pos) { m_pos = *pos; return S_OK; } STDMETHODIMP raw_GetSample(IAVStreamSample** sample); protected: virtual void* CreateSample(LONGLONG& pos, DWORD& size, LONGLONG& duration) = 0; LONGLONG m_pos; LONGLONG m_len; const std::wstring m_name; }; // class implementing a video stream, // generated video frames will contain the stream name and the frame number class VideoContentStream: public BaseContentStream { public: VideoContentStream(const std::wstring& name, const VideoInfo& vi, const LONGLONG& len); virtual ~VideoContentStream(); private: void* CreateSample(LONGLONG& pos, DWORD& size, LONGLONG& duration); VideoInfo m_videoInfo; HDC m_dc; HFONT m_font, m_oldFont; HBITMAP m_bmp, m_oldBmp; int m_imgSize; void* m_pixels; }; // class implementing an audio stream, // generated audio stream will contain the stream name in morse code class AudioContentStream: public BaseContentStream { public: AudioContentStream(const std::wstring& name, const AudioInfo& ai, const LONGLONG& len); virtual ~AudioContentStream(); private: void* CreateSample(LONGLONG& pos, DWORD& size, LONGLONG& duration); int CreateBeeps(const char* code, void* src, void* dst); void AddPause(int dits, unsigned char*& dst, int& len); void AddBeep(int dits, unsigned char*& dst, const void* src, int& len); AudioInfo m_audioInfo; int m_ditLen; unsigned char* m_sound; int m_seconds; }; // provider for a video stream class VideoProvider: public BaseObj, public IVideoStreamProvider { public: VideoProvider(const std::wstring& name): m_name(name) {} IMPL_IUNKNOWN HRESULT InternalQueryInterface(REFIID riid, void** ppv) { if (riid == __uuidof(IContentProvider)) return GetInterface(static_cast(this), ppv); else if (riid == __uuidof(IAVStreamProvider)) return GetInterface(static_cast(this), ppv); else if (riid == __uuidof(IVideoStreamProvider)) return GetInterface(static_cast(this), ppv); else return BaseObj::InternalQueryInterface(riid, ppv); } STDMETHODIMP raw_GetInfo(VideoInfo* info); STDMETHODIMP raw_GetStream(IAVStream** stream); STDMETHODIMP get_ProvidedContentType(ContentType* pCT) { *pCT = VideoStream; return S_OK; } STDMETHODIMP raw_GetDuration(hyper* dur) { *dur = STREAM_DURATION; return S_OK; } private: const std::wstring m_name; }; // provider for an audio stream class AudioProvider: public BaseObj, public IAudioStreamProvider { public: AudioProvider(const std::wstring& name): m_name(name) {} IMPL_IUNKNOWN HRESULT InternalQueryInterface(REFIID riid, void** ppv) { if (riid == __uuidof(IContentProvider)) return GetInterface(static_cast(this), ppv); else if (riid == __uuidof(IAVStreamProvider)) return GetInterface(static_cast(this), ppv); else if (riid == __uuidof(IAudioStreamProvider)) return GetInterface(static_cast(this), ppv); else return BaseObj::InternalQueryInterface(riid, ppv); } STDMETHODIMP raw_GetInfo(AudioInfo* info); STDMETHODIMP raw_GetStream(IAVStream** stream); STDMETHODIMP get_ProvidedContentType(ContentType* pCT) { *pCT = AudioStream; return S_OK; } STDMETHODIMP raw_GetDuration(hyper* dur) { *dur = STREAM_DURATION; return S_OK; } private: const std::wstring m_name; }; // ID resolving of content provider: for this test application any ID is valid. In case // the ID begins with "A:", the method returns an audio provider, all other IDs resolve // to a video provider, empty IDs cause an error HRESULT ContentResolver::raw_ResolveContent(BSTR id, IContentProvider** ppCP) { std::wstring str(id); if (str.length()) { if (str.substr(0, 2).compare(L"A:") == 0) { *ppCP = new AudioProvider(str.substr(2)); return S_OK; } else if (str.substr(0, 2).compare(L"V:") == 0) { *ppCP = new VideoProvider(str.substr(2)); return S_OK; } else { *ppCP = new VideoProvider(str); return S_OK; } } return E_INVALIDARG; } // provided test stream has the following properties: RGB32, 25 fps, aspect 4:3, // progressive, video size defaults to 480x360 but can be changed for testing // purpose by specifying the desired size at the beginning of the stream name HRESULT VideoProvider::raw_GetInfo(VideoInfo* info) { // if possible take width and height from stream name if (swscanf(m_name.c_str(), L"%d x %d", &info->viWidth, &info->viHeight) == 2) { info->viWidth = max(1, info->viWidth); info->viHeight = max(1, info->viHeight); } else { info->viWidth = 480; info->viHeight = 360; } info->viFormat = VideoFormat_RGB32; info->viStructure = FrameStructure_Progressive; info->viTimePerFrame = 400000; info->viAspectX = 4; info->viAspectY = 3; return S_OK; } // provided test stream is 44.1kHz, 16 bit, mono HRESULT AudioProvider::raw_GetInfo(AudioInfo* info) { info->aiFormat = AudioFormat_PCM; info->aiSamplesPerSec = 44100; info->aiBitsPerSample = 16; info->aiNumChannels = 1; return S_OK; } HRESULT VideoProvider::raw_GetStream(IAVStream** stream) { VideoInfo vi; LONGLONG d; raw_GetInfo(&vi); raw_GetDuration(&d); *stream = new VideoContentStream(m_name, vi, d); return S_OK; } HRESULT AudioProvider::raw_GetStream(IAVStream** stream) { AudioInfo ai; LONGLONG d; raw_GetInfo(&ai); raw_GetDuration(&d); *stream = new AudioContentStream(m_name, ai, d); return S_OK; } // creates a StreamSample object for each sample in the stream, // S_FALSE is returned to signal "end of stream", HRESULT BaseContentStream::raw_GetSample(IAVStreamSample** sample) { if (m_pos >= m_len) return S_FALSE; // base class uses CreateSample to generate the actual sample, // this method is implemented by derived classes DWORD size; LONGLONG dur; void* data = CreateSample(m_pos, size, dur); *sample = new StreamSample(m_pos, m_pos + dur, data, size); m_pos += dur; return S_OK; } VideoContentStream::VideoContentStream(const std::wstring& name, const VideoInfo& vi, const LONGLONG& len): BaseContentStream(name, len), m_videoInfo(vi) { // this class uses Windows GDI for drawing text into video frames, // initialize device context, font and bitmap m_dc = CreateCompatibleDC(0); int bpp = m_videoInfo.viFormat == VideoFormat_RGB24 ? 24 : 32; m_imgSize = m_videoInfo.viHeight * ((m_videoInfo.viWidth * bpp / 8 + 3) & ~3); LOGFONT lf; lf.lfHeight = m_videoInfo.viHeight / 16; lf.lfWidth = 0; lf.lfEscapement = 0; lf.lfOrientation = 0; lf.lfWeight = FW_DONTCARE; lf.lfItalic = 0; lf.lfUnderline = 0; lf.lfStrikeOut = 0; lf.lfCharSet = DEFAULT_CHARSET; lf.lfOutPrecision = OUT_DEFAULT_PRECIS; lf.lfClipPrecision = CLIP_DEFAULT_PRECIS; lf.lfQuality = ANTIALIASED_QUALITY; lf.lfPitchAndFamily = DEFAULT_PITCH | FF_DONTCARE; _tcscpy(lf.lfFaceName, TEXT("Arial")); HFONT smallFont = CreateFontIndirect(&lf); m_oldFont = (HFONT)SelectObject(m_dc, smallFont); BITMAPINFO bi = {0}; bi.bmiHeader.biBitCount = bpp; bi.bmiHeader.biCompression = BI_RGB; bi.bmiHeader.biWidth = m_videoInfo.viWidth; bi.bmiHeader.biHeight = m_videoInfo.viHeight; bi.bmiHeader.biPlanes = 1; bi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER); bi.bmiHeader.biSizeImage = m_imgSize; m_bmp = CreateDIBSection(m_dc, &bi, DIB_RGB_COLORS, &m_pixels, 0, 0); m_oldBmp = (HBITMAP)SelectObject(m_dc, m_bmp); SetBkColor(m_dc, RGB(0, 0, 0)); SetTextColor(m_dc, RGB(210, 210, 0)); RECT rect = {0, m_videoInfo.viHeight * 14 / 16, m_videoInfo.viWidth, m_videoInfo.viHeight}; DrawText(m_dc, _bstr_t(m_name.c_str()), -1, &rect, DT_CENTER | DT_TOP); lf.lfHeight = m_videoInfo.viHeight / 2; m_font = CreateFontIndirect(&lf); SelectObject(m_dc, m_font); DeleteObject(smallFont); } VideoContentStream::~VideoContentStream() { SelectObject(m_dc, m_oldFont); SelectObject(m_dc, m_oldBmp); DeleteObject(m_font); DeleteObject(m_bmp); DeleteDC(m_dc); } void* VideoContentStream::CreateSample(LONGLONG& pos, DWORD& size, LONGLONG& duration) { void* p = malloc(m_imgSize); duration = 400000; // get integer frame number int n = int(pos / duration); pos = n * duration; // clear image and draw number BitBlt(m_dc, 0, 0, m_videoInfo.viWidth, m_videoInfo.viHeight * 14 / 16, m_dc, 0, 0, BLACKNESS); TCHAR txt[5]; _stprintf(txt, _T("%d"), n % 10000); RECT rect = {0, 0, m_videoInfo.viWidth, m_videoInfo.viHeight}; DrawText(m_dc, txt, -1, &rect, DT_CENTER | DT_VCENTER | DT_SINGLELINE); // copy into output buffer memcpy(p, m_pixels, m_imgSize); return p; } AudioContentStream::AudioContentStream(const std::wstring& name, const AudioInfo& ai, const LONGLONG& len): BaseContentStream(name, len), m_audioInfo(ai) { const double freq = 2000; const int bytePerSec = m_audioInfo.aiSamplesPerSec * m_audioInfo.aiNumChannels * m_audioInfo.aiBitsPerSample / 8; int ditSamples = 0.06 * m_audioInfo.aiSamplesPerSec; m_ditLen = m_audioInfo.aiNumChannels * m_audioInfo.aiBitsPerSample / 8 * ditSamples; // generate sine wave to use as beep for morse code void* sine = malloc(3 * m_ditLen); short* ptr = (short*)sine; for (int sample = 0; sample < 3 * ditSamples; ++sample) { int value = 24000 * sin(freq * 2 * PI * sample / m_audioInfo.aiSamplesPerSec); for (int ch = 0; ch < m_audioInfo.aiNumChannels; ++ch) { *ptr++ = value; } } // convert stream name to mose code string std::string s = morse(_bstr_t(m_name.c_str())); // generate audio from morse code string, // call CreateBeeps once to determine required buffer size int l = CreateBeeps(s.c_str(), 0, 0); // add 5 sec pause at end and round to whole seconds m_seconds = l / double(bytePerSec) + 5.5; l = m_seconds * bytePerSec; m_sound = (unsigned char*)malloc(l); memset(m_sound, 0, l); // audio signal is generated once and saved in a buffer, // later each stream sample will get its audio from this buffer CreateBeeps(s.c_str(), sine, m_sound); free(sine); } void AudioContentStream::AddPause(int dits, unsigned char*& dst, int& len) { int l = dits * m_ditLen; if (dst) { memset(dst, 0, l); dst += l; } len += l; } void AudioContentStream::AddBeep(int dits, unsigned char*& dst, const void* src, int& len) { int l = dits * m_ditLen; if (dst) { memcpy(dst, src, l); dst += l; } len += l; } // convert morse code string into audio signal int AudioContentStream::CreateBeeps(const char* code, void* src, void* dst) { int len = 0; unsigned char* b = (unsigned char*) dst; while (*code) { switch (*code) { case '.': AddBeep(1, b, src, len); AddPause(1, b, len); break; case '-': AddBeep(3, b, src, len); AddPause(1, b, len); break; default: AddPause(6, b, len); break; } ++code; } return len; } AudioContentStream::~AudioContentStream() { free(m_sound); } void* AudioContentStream::CreateSample(LONGLONG& pos, DWORD& size, LONGLONG& duration) { // deliver 1/4 sec of audio per sample, // adjust pos to integer multiple of quarter seconds const int bytePerSec = m_audioInfo.aiSamplesPerSec * m_audioInfo.aiNumChannels * m_audioInfo.aiBitsPerSample / 8; int q = pos / 2500000; pos = LONGLONG(q) * 2500000; size = bytePerSec / 4; void* p = malloc(size); duration = 2500000; // copy the required audio part from our buffer into the sample // (audio restarts from beginning if stream is longer than morse code buffer) memcpy(p, m_sound + (q % (4 * m_seconds)) * size, size); return p; }