ffaudio-core.cc
| 1 |
/*
|
|---|---|
| 2 |
* Audacious FFaudio Plugin
|
| 3 |
* Copyright © 2009 William Pitcock <nenolod@dereferenced.org>
|
| 4 |
* Matti Hämäläinen <ccr@tnsp.org>
|
| 5 |
* Copyright © 2011 John Lindgren <john.lindgren@tds.net>
|
| 6 |
* Video-playing capability added Copyright © 2015 Jim Turner <turnerjw784@yahoo.com>
|
| 7 |
*
|
| 8 |
* Redistribution and use in source and binary forms, with or without
|
| 9 |
* modification, are permitted provided that the following conditions are met:
|
| 10 |
*
|
| 11 |
* 1. Redistributions of source code must retain the above copyright notice,
|
| 12 |
* this list of conditions, and the following disclaimer.
|
| 13 |
*
|
| 14 |
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
| 15 |
* this list of conditions, and the following disclaimer in the documentation
|
| 16 |
* provided with the distribution.
|
| 17 |
*
|
| 18 |
* This software is provided "as is" and without any warranty, express or
|
| 19 |
* implied. In no event shall the authors be liable for any damages arising from
|
| 20 |
*/
|
| 21 |
|
| 22 |
#include <stdlib.h> |
| 23 |
#include <stdio.h> |
| 24 |
#include <string.h> |
| 25 |
|
| 26 |
#include <pthread.h> |
| 27 |
|
| 28 |
#undef FFAUDIO_DOUBLECHECK /* Doublecheck probing result for debugging purposes */ |
| 29 |
#undef FFAUDIO_NO_BLACKLIST /* Don't blacklist any recognized codecs/formats */ |
| 30 |
|
| 31 |
#include "ffaudio-stdinc.h" |
| 32 |
|
| 33 |
#include <audacious/audtag.h> |
| 34 |
#include <libaudcore/audstrings.h> |
| 35 |
#include <libaudcore/i18n.h> |
| 36 |
#include <libaudcore/multihash.h> |
| 37 |
#include <libaudcore/runtime.h> |
| 38 |
|
| 39 |
#include <stdint.h> |
| 40 |
#include "libavutil/common.h" |
| 41 |
#include "libavutil/dict.h" |
| 42 |
#include "libavutil/log.h" |
| 43 |
#include "libavformat/version.h" |
| 44 |
#include "libavformat/avio.h" |
| 45 |
extern "C" { |
| 46 |
#include <libswscale/swscale.h> |
| 47 |
} |
| 48 |
#include <SDL.h> |
| 49 |
#include <SDL_thread.h> |
| 50 |
|
| 51 |
#define SDL_AUDIO_BUFFER_SIZE 4096 |
| 52 |
#define MAX_AUDIO_FRAME_SIZE 192000 |
| 53 |
|
| 54 |
|
| 55 |
class FFaudio : public InputPlugin |
| 56 |
{
|
| 57 |
public:
|
| 58 |
static const char about[]; |
| 59 |
static const char * const exts[], * const mimes[]; |
| 60 |
|
| 61 |
static constexpr PluginInfo info = { |
| 62 |
N_("FFmpeg Plugin"),
|
| 63 |
PACKAGE, |
| 64 |
about |
| 65 |
}; |
| 66 |
|
| 67 |
static constexpr auto iinfo = InputInfo (FlagWritesTag) |
| 68 |
.with_priority (10) /* lowest priority fallback */ |
| 69 |
.with_exts (exts) |
| 70 |
.with_mimes (mimes); |
| 71 |
|
| 72 |
constexpr FFaudio () : InputPlugin (info, iinfo) {}
|
| 73 |
|
| 74 |
bool init ();
|
| 75 |
void cleanup ();
|
| 76 |
|
| 77 |
bool is_our_file (const char * filename, VFSFile & file); |
| 78 |
Tuple read_tuple (const char * filename, VFSFile & file); |
| 79 |
Index<char> read_image (const char * filename, VFSFile & file); |
| 80 |
bool write_tuple (const char * filename, VFSFile & file, const Tuple & tuple); |
| 81 |
bool play (const char * filename, VFSFile & file); |
| 82 |
}; |
| 83 |
|
| 84 |
EXPORT FFaudio aud_plugin_instance; |
| 85 |
|
| 86 |
static bool play_video; /* JWT: TRUE IF USER IS CURRENTLY PLAYING VIDEO (KILLINV VID. WINDOW TURNS OFF! */ |
| 87 |
static bool play_video_wason; /* JWT: TRUE IF USER WANTS VIDEO PART OF VIDEO FILES DISPLAYED */ |
| 88 |
static bool fromstdin; /* JWT: TRUE IF INPUT IS STREAMED IN VIA STDIN, FALSE OTHERWISE */ |
| 89 |
|
| 90 |
typedef struct |
| 91 |
{
|
| 92 |
int stream_idx;
|
| 93 |
AVStream * stream; |
| 94 |
AVCodecContext * context; |
| 95 |
AVCodec * codec; |
| 96 |
} |
| 97 |
CodecInfo; |
| 98 |
static CodecInfo cinfo; /* JWT: SAVE THE AUDIO & VIDEO CODEC STRUCTURES SINCE NOW REFERENCED IN 2 FUNCTIONS: */ |
| 99 |
static CodecInfo vcinfo;
|
| 100 |
|
| 101 |
/*
|
| 102 |
JWT: ADDED ALL THIS QUEUE STUFF TO SMOOTH VIDEO PERFORMANCE SO THAT VIDEO FRAMES WOULD
|
| 103 |
BE OUTPUT MORE INTERLACED WITH THE AUDIO FRAMES BY QUEUEING VIDEO FRAMES UNTIL AN
|
| 104 |
AUDIO FRAME IS PROCESSED, THEN DEQUEUEING AND PROCESSING 'EM WITH EACH AUDIO FRAME.
|
| 105 |
THE SIZE OF THIS QUEUE IS SET BY video_qsize CONFIG PARAMETER AND DEFAULTS TO 20.
|
| 106 |
HAVING TOO MANY CAN RESULT IN DELAYED VIDEO, SO EXPERIMENT. IDEALLY, PACKETS SHOULD
|
| 107 |
BE PROCESSED: V A V A V A..., BUT THIS HANDLES:
|
| 108 |
V1 V2 V3 V4 V5 A1 A2 A3 A4 A5 A6 A7 V7 A8... AS:
|
| 109 |
(Q:V1 V2 V3 V4 V5 V6) A1 A2 V1 A3 A4 V2 A5 A6 V3 A7 A8...
|
| 110 |
WE DON'T WANT TO INTERRUPT AUDIO PERFORMANCE AND I DON'T KNOW HOW TO THREAD IT UP,
|
| 111 |
BUT THIS SIMPLE APPROACH SEEMS TO WORK PRETTY SMOOTH FOR ME! OTHERWISE TRY
|
| 112 |
INCREASING video_qsize IN config file OTHERWISE.
|
| 113 |
BORROWED THESE FUNCTIONS FROM:
|
| 114 |
http://www.thelearningpoint.net/computer-science/data-structures-queues--with-c-program-source-code
|
| 115 |
*/
|
| 116 |
|
| 117 |
typedef struct |
| 118 |
{
|
| 119 |
int capacity;
|
| 120 |
int size;
|
| 121 |
int front;
|
| 122 |
int rear;
|
| 123 |
AVPacket *elements; |
| 124 |
} |
| 125 |
pktQueue; |
| 126 |
|
| 127 |
pktQueue * createQueue (int maxElements)
|
| 128 |
{
|
| 129 |
/* Create a Queue */
|
| 130 |
pktQueue *Q; |
| 131 |
Q = (pktQueue *)malloc(sizeof(pktQueue));
|
| 132 |
/* Initialise its properties */
|
| 133 |
Q->elements = (AVPacket *)malloc(sizeof(AVPacket)*maxElements);
|
| 134 |
Q->size = 0;
|
| 135 |
Q->capacity = maxElements; |
| 136 |
Q->front = 0;
|
| 137 |
Q->rear = -1;
|
| 138 |
/* Return the pointer */
|
| 139 |
return Q;
|
| 140 |
} |
| 141 |
|
| 142 |
bool Dequeue (pktQueue *Q)
|
| 143 |
{
|
| 144 |
/* If Queue size is zero then it is empty. So we cannot pop */
|
| 145 |
if(Q->size==0) |
| 146 |
return false; |
| 147 |
/* Removing an element is equivalent to incrementing index of front by one */
|
| 148 |
else
|
| 149 |
{
|
| 150 |
Q->size--; |
| 151 |
if (Q->elements[Q->front].data)
|
| 152 |
av_free_packet(&Q->elements[Q->front]); |
| 153 |
|
| 154 |
Q->front++; |
| 155 |
/* As we fill elements in circular fashion */
|
| 156 |
if(Q->front==Q->capacity)
|
| 157 |
{
|
| 158 |
Q->front=0;
|
| 159 |
} |
| 160 |
} |
| 161 |
return true; |
| 162 |
} |
| 163 |
|
| 164 |
AVPacket * QFront (pktQueue *Q) |
| 165 |
{
|
| 166 |
if(Q->size==0) |
| 167 |
{
|
| 168 |
AUDDBG("Queue is Empty\n");
|
| 169 |
return nullptr; |
| 170 |
} |
| 171 |
/* Return the element which is at the front*/
|
| 172 |
return &Q->elements[Q->front];
|
| 173 |
} |
| 174 |
|
| 175 |
bool isQueueFull (pktQueue *Q)
|
| 176 |
{
|
| 177 |
return (Q->size == Q->capacity) ? true : false; |
| 178 |
} |
| 179 |
|
| 180 |
bool Enqueue (pktQueue *Q, AVPacket element)
|
| 181 |
{
|
| 182 |
/* If the Queue is full, we cannot push an element into it as there is no space for it.*/
|
| 183 |
if(Q->size == Q->capacity)
|
| 184 |
{
|
| 185 |
printf("Queue is Full\n");
|
| 186 |
return false; |
| 187 |
} |
| 188 |
else
|
| 189 |
{
|
| 190 |
Q->size++; |
| 191 |
Q->rear = Q->rear + 1;
|
| 192 |
/* As we fill the queue in circular fashion */
|
| 193 |
if(Q->rear == Q->capacity)
|
| 194 |
{
|
| 195 |
Q->rear = 0;
|
| 196 |
} |
| 197 |
/* Insert the element in its rear side */
|
| 198 |
Q->elements[Q->rear] = element; |
| 199 |
} |
| 200 |
return true; |
| 201 |
} |
| 202 |
|
| 203 |
/* JWT:END OF ADDED VIDEO PACKET QUEUEING FUNCTIONS */
|
| 204 |
|
| 205 |
static SimpleHash<String, AVInputFormat *> extension_dict;
|
| 206 |
|
| 207 |
static void create_extension_dict (); |
| 208 |
|
| 209 |
static int lockmgr (void * * mutexp, enum AVLockOp op) |
| 210 |
{
|
| 211 |
switch (op)
|
| 212 |
{
|
| 213 |
case AV_LOCK_CREATE:
|
| 214 |
* mutexp = new pthread_mutex_t;
|
| 215 |
pthread_mutex_init ((pthread_mutex_t *) * mutexp, nullptr);
|
| 216 |
break;
|
| 217 |
case AV_LOCK_OBTAIN:
|
| 218 |
pthread_mutex_lock ((pthread_mutex_t *) * mutexp); |
| 219 |
break;
|
| 220 |
case AV_LOCK_RELEASE:
|
| 221 |
pthread_mutex_unlock ((pthread_mutex_t *) * mutexp); |
| 222 |
break;
|
| 223 |
case AV_LOCK_DESTROY:
|
| 224 |
pthread_mutex_destroy ((pthread_mutex_t *) * mutexp); |
| 225 |
delete (pthread_mutex_t *) * mutexp;
|
| 226 |
break;
|
| 227 |
} |
| 228 |
|
| 229 |
return 0; |
| 230 |
} |
| 231 |
|
| 232 |
static void ffaudio_log_cb (void * avcl, int av_level, const char * fmt, va_list va) |
| 233 |
{
|
| 234 |
audlog::Level level = audlog::Debug; |
| 235 |
char message [2048]; |
| 236 |
|
| 237 |
switch (av_level)
|
| 238 |
{
|
| 239 |
case AV_LOG_QUIET:
|
| 240 |
return;
|
| 241 |
case AV_LOG_PANIC:
|
| 242 |
case AV_LOG_FATAL:
|
| 243 |
case AV_LOG_ERROR:
|
| 244 |
level = audlog::Error; |
| 245 |
break;
|
| 246 |
case AV_LOG_WARNING:
|
| 247 |
level = audlog::Warning; |
| 248 |
break;
|
| 249 |
case AV_LOG_INFO:
|
| 250 |
level = audlog::Info; |
| 251 |
break;
|
| 252 |
default:
|
| 253 |
break;
|
| 254 |
} |
| 255 |
|
| 256 |
AVClass * avc = avcl ? * (AVClass * *) avcl : nullptr;
|
| 257 |
|
| 258 |
vsnprintf (message, sizeof message, fmt, va);
|
| 259 |
|
| 260 |
audlog::log (level, __FILE__, __LINE__, avc ? avc->item_name(avcl) : __FUNCTION__, |
| 261 |
"<%p> %s", avcl, message);
|
| 262 |
} |
| 263 |
|
| 264 |
bool FFaudio::init ()
|
| 265 |
{
|
| 266 |
av_register_all(); |
| 267 |
av_lockmgr_register (lockmgr); |
| 268 |
|
| 269 |
create_extension_dict (); |
| 270 |
|
| 271 |
av_log_set_callback (ffaudio_log_cb); |
| 272 |
/* JWT:SAVE config OPTION VALUE FOR CLOSING THINGS, IN CASE USER KILLS VIDEO WINDOW
|
| 273 |
THUS TURNING "play_video" OFF DURING PLAY.
|
| 274 |
*/
|
| 275 |
play_video_wason = aud_get_bool ("ffaudio", "play_video"); |
| 276 |
|
| 277 |
return true; |
| 278 |
} |
| 279 |
|
| 280 |
void FFaudio::cleanup ()
|
| 281 |
{
|
| 282 |
extension_dict.clear (); |
| 283 |
|
| 284 |
av_lockmgr_register (nullptr);
|
| 285 |
} |
| 286 |
|
| 287 |
static const char * ffaudio_strerror (int error) |
| 288 |
{
|
| 289 |
static char buf[256]; |
| 290 |
return (! av_strerror (error, buf, sizeof buf)) ? buf : "unknown error"; |
| 291 |
} |
| 292 |
|
| 293 |
static void create_extension_dict () |
| 294 |
{
|
| 295 |
AVInputFormat * f; |
| 296 |
for (f = av_iformat_next (nullptr); f; f = av_iformat_next (f)) |
| 297 |
{
|
| 298 |
if (! f->extensions)
|
| 299 |
continue;
|
| 300 |
|
| 301 |
StringBuf exts = str_tolower (f->extensions); |
| 302 |
Index<String> extlist = str_list_to_index (exts, ",");
|
| 303 |
|
| 304 |
for (auto & ext : extlist) |
| 305 |
extension_dict.add (ext, std::move (f)); |
| 306 |
} |
| 307 |
} |
| 308 |
|
| 309 |
static AVInputFormat * get_format_by_extension (const char * name) |
| 310 |
{
|
| 311 |
StringBuf ext = uri_get_extension (name); |
| 312 |
if (! ext)
|
| 313 |
return nullptr; |
| 314 |
|
| 315 |
AUDDBG ("Get format by extension: %s\n", name);
|
| 316 |
AVInputFormat * * f = extension_dict.lookup (String (str_tolower (ext))); |
| 317 |
|
| 318 |
if (f && * f)
|
| 319 |
AUDDBG ("Format %s.\n", (* f)->name);
|
| 320 |
else
|
| 321 |
AUDDBG ("Format unknown.\n");
|
| 322 |
|
| 323 |
return f ? * f : nullptr; |
| 324 |
} |
| 325 |
|
| 326 |
static AVInputFormat * get_format_by_content (const char * name, VFSFile & file) |
| 327 |
{
|
| 328 |
AUDDBG ("Get format by content: %s\n", name);
|
| 329 |
|
| 330 |
AVInputFormat * f = nullptr;
|
| 331 |
|
| 332 |
unsigned char buf[16384 + AVPROBE_PADDING_SIZE]; |
| 333 |
int size = 16; |
| 334 |
int filled = 0; |
| 335 |
int target = 100; |
| 336 |
int score = 0; |
| 337 |
|
| 338 |
while (1) |
| 339 |
{
|
| 340 |
if (filled < size)
|
| 341 |
filled += file.fread (buf + filled, 1, size - filled);
|
| 342 |
|
| 343 |
memset (buf + filled, 0, AVPROBE_PADDING_SIZE);
|
| 344 |
AVProbeData d = {name, buf, filled};
|
| 345 |
score = target; |
| 346 |
|
| 347 |
f = av_probe_input_format2 (& d, true, & score);
|
| 348 |
if (f)
|
| 349 |
break;
|
| 350 |
|
| 351 |
if (size < 16384 && filled == size) |
| 352 |
size *= 4;
|
| 353 |
else if (target > 10) |
| 354 |
target = 10;
|
| 355 |
else
|
| 356 |
break;
|
| 357 |
} |
| 358 |
|
| 359 |
if (f)
|
| 360 |
AUDDBG ("Format %s, buffer size %d, score %d.\n", f->name, filled, score);
|
| 361 |
else
|
| 362 |
AUDDBG ("Format unknown.\n");
|
| 363 |
|
| 364 |
if (file.fseek (0, VFS_SEEK_SET) < 0) |
| 365 |
; /* ignore errors here */
|
| 366 |
|
| 367 |
return f;
|
| 368 |
} |
| 369 |
|
| 370 |
static AVInputFormat * get_format (const char * name, VFSFile & file) |
| 371 |
{
|
| 372 |
AVInputFormat * f = get_format_by_extension (name); |
| 373 |
return f ? f : get_format_by_content (name, file);
|
| 374 |
} |
| 375 |
|
| 376 |
bool isFromStdin () /* JWT: TRUE IF INPUT IS STREAMED IN VIA STDIN, FALSE OTHERWISE */ |
| 377 |
{
|
| 378 |
return fromstdin;
|
| 379 |
} |
| 380 |
|
| 381 |
static AVFormatContext * open_input_file (const char * name, VFSFile & file) |
| 382 |
{
|
| 383 |
AVFormatContext * c = NULL;
|
| 384 |
|
| 385 |
play_video = aud_get_bool ("ffaudio", "play_video"); //JWT:RESET PLAY-VIDEO, CASE TURNED OFF ON PREV. PLAY |
| 386 |
const char * xname = name; |
| 387 |
if (fromstdin)
|
| 388 |
xname = "pipe:";
|
| 389 |
int ret = avformat_open_input (&c, xname, NULL, NULL); |
| 390 |
if (ret)
|
| 391 |
{
|
| 392 |
AUDERR ("avformat_open_input failed for %s: %s.\n", xname, ffaudio_strerror (ret));
|
| 393 |
return nullptr; |
| 394 |
} |
| 395 |
|
| 396 |
return c;
|
| 397 |
} |
| 398 |
|
| 399 |
static void close_input_file (AVFormatContext * c, CodecInfo * cinfo, CodecInfo * vcinfo) |
| 400 |
{
|
| 401 |
if (play_video_wason)
|
| 402 |
avcodec_close(vcinfo->context); |
| 403 |
play_video_wason = play_video; |
| 404 |
avcodec_close(cinfo->context); |
| 405 |
#if CHECK_LIBAVFORMAT_VERSION (53, 25, 0, 53, 17, 0) |
| 406 |
avformat_close_input (&c); |
| 407 |
#else
|
| 408 |
av_close_input_file (c); |
| 409 |
#endif
|
| 410 |
|
| 411 |
} |
| 412 |
|
| 413 |
static bool find_codec (AVFormatContext * c, CodecInfo * cinfo, CodecInfo * vcinfo) |
| 414 |
{
|
| 415 |
avformat_find_stream_info (c, NULL);
|
| 416 |
|
| 417 |
int videoStream=-1; |
| 418 |
int audioStream=-1; |
| 419 |
for (unsigned i = 0; i < c->nb_streams; i++) |
| 420 |
{
|
| 421 |
if (c->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
|
| 422 |
videoStream=i; |
| 423 |
else if (c->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && audioStream < 0) |
| 424 |
audioStream=i; |
| 425 |
} |
| 426 |
if (audioStream==-1) /* PUNT IF NO AUDIO SINCE AUDACIOUS IS AN *AUDIO* PLAYER! */ |
| 427 |
return false; |
| 428 |
|
| 429 |
AVCodec * codec = avcodec_find_decoder (c->streams[audioStream]->codec->codec_id); |
| 430 |
if (codec)
|
| 431 |
{
|
| 432 |
cinfo->stream_idx = audioStream; |
| 433 |
cinfo->stream = c->streams[audioStream]; |
| 434 |
cinfo->context = c->streams[audioStream]->codec; |
| 435 |
cinfo->codec = codec; |
| 436 |
|
| 437 |
/* JWT: NOW IF USER WANTS VIDEO, SEE IF WE GOT A VIDEO STREAM TOO: */
|
| 438 |
if (play_video && videoStream >= 0) |
| 439 |
{
|
| 440 |
AVCodec * vcodec = avcodec_find_decoder (c->streams[videoStream]->codec->codec_id); |
| 441 |
if (vcodec)
|
| 442 |
{
|
| 443 |
vcinfo->stream_idx = videoStream; |
| 444 |
vcinfo->stream = c->streams[videoStream]; |
| 445 |
vcinfo->context = c->streams[videoStream]->codec; |
| 446 |
vcinfo->codec = vcodec; |
| 447 |
} |
| 448 |
} |
| 449 |
else
|
| 450 |
play_video = false; /* turn off video playback, since we could not find stream! */ |
| 451 |
|
| 452 |
return true; |
| 453 |
} |
| 454 |
|
| 455 |
return false; |
| 456 |
} |
| 457 |
|
| 458 |
bool FFaudio::is_our_file (const char * filename, VFSFile & file) |
| 459 |
{
|
| 460 |
return (bool) get_format (filename, file); |
| 461 |
} |
| 462 |
|
| 463 |
static const struct { |
| 464 |
Tuple::ValueType ttype; /* Tuple field value type */
|
| 465 |
Tuple::Field field; /* Tuple field constant */
|
| 466 |
const char * keys[5]; /* Keys to match (case-insensitive), ended by nullptr */ |
| 467 |
} metaentries[] = {
|
| 468 |
{Tuple::String, Tuple::Artist, {"author", "hor", "artist", nullptr}},
|
| 469 |
{Tuple::String, Tuple::Title, {"title", "le", nullptr}},
|
| 470 |
{Tuple::String, Tuple::Album, {"album", "WM/AlbumTitle", nullptr}},
|
| 471 |
{Tuple::String, Tuple::Performer, {"performer", nullptr}},
|
| 472 |
{Tuple::String, Tuple::Copyright, {"copyright", nullptr}},
|
| 473 |
{Tuple::String, Tuple::Genre, {"genre", "WM/Genre", nullptr}},
|
| 474 |
{Tuple::String, Tuple::Comment, {"comment", nullptr}},
|
| 475 |
{Tuple::String, Tuple::Composer, {"composer", nullptr}},
|
| 476 |
{Tuple::Int, Tuple::Year, {"year", "WM/Year", "date", nullptr}},
|
| 477 |
{Tuple::Int, Tuple::Track, {"track", "WM/TrackNumber", nullptr}},
|
| 478 |
}; |
| 479 |
|
| 480 |
static void read_metadata_dict (Tuple & tuple, AVDictionary * dict) |
| 481 |
{
|
| 482 |
for (auto & meta : metaentries) |
| 483 |
{
|
| 484 |
AVDictionaryEntry * entry = nullptr;
|
| 485 |
|
| 486 |
for (int j = 0; ! entry && meta.keys[j]; j ++) |
| 487 |
entry = av_dict_get (dict, meta.keys[j], nullptr, 0); |
| 488 |
|
| 489 |
if (entry && entry->value)
|
| 490 |
{
|
| 491 |
if (meta.ttype == Tuple::String)
|
| 492 |
tuple.set_str (meta.field, entry->value); |
| 493 |
else if (meta.ttype == Tuple::Int) |
| 494 |
tuple.set_int (meta.field, atoi (entry->value)); |
| 495 |
} |
| 496 |
} |
| 497 |
} |
| 498 |
|
| 499 |
Tuple FFaudio::read_tuple (const char * filename, VFSFile & file) |
| 500 |
{
|
| 501 |
Tuple tuple; |
| 502 |
fromstdin = (!strcmp(filename, "-") || strstr(filename, "://-.")) ? true : false; |
| 503 |
AUDDBG("Filename =%s=\n", filename);
|
| 504 |
if (fromstdin)
|
| 505 |
{
|
| 506 |
tuple.set_filename (filename); |
| 507 |
} |
| 508 |
else /* JWT:THIS STUFF DEFERRED UNTIL PLAY FOR STDIN, BUT SEEMS TO HAVE TO BE HERE FOR DIRECT */ |
| 509 |
{
|
| 510 |
AVFormatContext * ic = open_input_file (filename, file); |
| 511 |
|
| 512 |
if (ic)
|
| 513 |
{
|
| 514 |
if (find_codec (ic, & cinfo, & vcinfo))
|
| 515 |
{
|
| 516 |
tuple.set_filename (filename); |
| 517 |
|
| 518 |
tuple.set_int (Tuple::Length, ic->duration / 1000);
|
| 519 |
tuple.set_int (Tuple::Bitrate, ic->bit_rate / 1000);
|
| 520 |
if (cinfo.codec->long_name)
|
| 521 |
tuple.set_str (Tuple::Codec, cinfo.codec->long_name); |
| 522 |
if (ic->metadata)
|
| 523 |
read_metadata_dict (tuple, ic->metadata); |
| 524 |
if (cinfo.stream->metadata)
|
| 525 |
read_metadata_dict (tuple, cinfo.stream->metadata); |
| 526 |
if (play_video && vcinfo.stream->metadata)
|
| 527 |
read_metadata_dict (tuple, vcinfo.stream->metadata); |
| 528 |
} |
| 529 |
|
| 530 |
close_input_file (ic, & cinfo, & vcinfo); |
| 531 |
} |
| 532 |
|
| 533 |
if (tuple && ! file.fseek (0, VFS_SEEK_SET)) |
| 534 |
audtag::tuple_read (tuple, file); |
| 535 |
/* JWT:BUILD NOTE: IF USING LATEST AUDACIOUS VERSIONS YOU MAY NEED TO REPLACE ABOVE LINE WITH:
|
| 536 |
audtag::read_tag (file, & tuple, nullptr);
|
| 537 |
*/
|
| 538 |
} |
| 539 |
|
| 540 |
return tuple;
|
| 541 |
} |
| 542 |
|
| 543 |
bool FFaudio::write_tuple (const char * filename, VFSFile & file, const Tuple & tuple) |
| 544 |
{
|
| 545 |
if (str_has_suffix_nocase (filename, ".ape")) |
| 546 |
return audtag::tuple_write (tuple, file, audtag::TagType::APE);
|
| 547 |
|
| 548 |
return audtag::tuple_write (tuple, file, audtag::TagType::None);
|
| 549 |
} |
| 550 |
|
| 551 |
Index<char> FFaudio::read_image (const char * filename, VFSFile & file) |
| 552 |
{
|
| 553 |
if (str_has_suffix_nocase (filename, ".m4a") || str_has_suffix_nocase (filename, ".mp4")) |
| 554 |
return read_itunes_cover (filename, file);
|
| 555 |
|
| 556 |
return Index<char> (); |
| 557 |
} |
| 558 |
|
| 559 |
/* JWT: NEW FUNCTION TO WRITE VIDEO FRAMES TO THE POPUP WINDOW: */
|
| 560 |
bool write_videoframe (SwsContext * sws_ctx, AVFrame * vframe, SDL_Overlay * bmp,
|
| 561 |
AVPacket *pkt, int * frameFinished, int video_width, int video_height) |
| 562 |
{
|
| 563 |
SDL_Rect rect; |
| 564 |
int len = avcodec_decode_video2 (vcinfo.context, vframe, frameFinished, pkt);
|
| 565 |
// Did we get a video frame?
|
| 566 |
if (len < 0) |
| 567 |
{
|
| 568 |
AUDERR ("decode_video() failed, code %d\n", len);
|
| 569 |
return false; |
| 570 |
} |
| 571 |
if (*frameFinished)
|
| 572 |
{
|
| 573 |
SDL_LockYUVOverlay(bmp); |
| 574 |
AVPicture pict; |
| 575 |
pict.data[0] = bmp->pixels[0]; |
| 576 |
pict.data[1] = bmp->pixels[2]; |
| 577 |
pict.data[2] = bmp->pixels[1]; |
| 578 |
pict.linesize[0] = bmp->pitches[0]; |
| 579 |
pict.linesize[1] = bmp->pitches[2]; |
| 580 |
pict.linesize[2] = bmp->pitches[1]; |
| 581 |
// Convert the image into YUV format that SDL uses
|
| 582 |
sws_scale(sws_ctx, (uint8_t const * const *)vframe->data, |
| 583 |
vframe->linesize, 0, vcinfo.context->height,
|
| 584 |
pict.data, pict.linesize); |
| 585 |
SDL_UnlockYUVOverlay(bmp); |
| 586 |
|
| 587 |
rect.x = 0;
|
| 588 |
rect.y = 0;
|
| 589 |
rect.w = video_width; |
| 590 |
rect.h = video_height; |
| 591 |
SDL_DisplayYUVOverlay(bmp, &rect); |
| 592 |
return false; |
| 593 |
} |
| 594 |
return true; |
| 595 |
} |
| 596 |
|
| 597 |
bool FFaudio::play (const char * filename, VFSFile & file) |
| 598 |
{
|
| 599 |
AUDDBG ("Playing %s.\n", filename);
|
| 600 |
|
| 601 |
AVPacket pkt = AVPacket(); |
| 602 |
int errcount;
|
| 603 |
/* bool codec_opened = false; //JWT:DON'T THINK WE NEED ANYMORE. */
|
| 604 |
int out_fmt;
|
| 605 |
bool planar;
|
| 606 |
bool error = false; |
| 607 |
SDL_Overlay *bmp = nullptr; /* JWT: ALL SDL_* STUFF IS FOR PLAYING VIDEOS */ |
| 608 |
SDL_Surface *screen = nullptr;
|
| 609 |
SDL_Event event; |
| 610 |
int video_width = 0; |
| 611 |
int video_height = 0; |
| 612 |
play_video_wason = play_video; |
| 613 |
float video_aspect_ratio = 0; |
| 614 |
struct SwsContext *sws_ctx = NULL; |
| 615 |
|
| 616 |
Index<char> buf;
|
| 617 |
|
| 618 |
AVFormatContext * ic = open_input_file (filename, file); |
| 619 |
const char * fileext = strrchr (filename, '.'); |
| 620 |
if (ic)
|
| 621 |
{
|
| 622 |
if (fromstdin) /* JWT: FOR STDIN: TRY TO GET "read_tuple()" STUFF NOW, SINCE FILE COULD NOT BE OPENED EARLIER IN read_tuple()! */ |
| 623 |
{
|
| 624 |
Tuple tuple; |
| 625 |
|
| 626 |
AUDDBG ("---- playing from STDIN: get TUPLE stuff now: IC is defined\n");
|
| 627 |
if (find_codec (ic, & cinfo, & vcinfo))
|
| 628 |
{
|
| 629 |
tuple.set_filename (filename); |
| 630 |
|
| 631 |
tuple.set_int (Tuple::Length, ic->duration / 1000);
|
| 632 |
tuple.set_int (Tuple::Bitrate, ic->bit_rate / 1000);
|
| 633 |
if (cinfo.codec->long_name)
|
| 634 |
tuple.set_str (Tuple::Codec, cinfo.codec->long_name); |
| 635 |
if (ic->metadata)
|
| 636 |
read_metadata_dict (tuple, ic->metadata); |
| 637 |
if (cinfo.stream->metadata)
|
| 638 |
read_metadata_dict (tuple, cinfo.stream->metadata); |
| 639 |
if (play_video && vcinfo.stream->metadata)
|
| 640 |
read_metadata_dict (tuple, vcinfo.stream->metadata); |
| 641 |
set_playback_tuple (tuple.ref ()); |
| 642 |
} |
| 643 |
else
|
| 644 |
{
|
| 645 |
AUDERR ("No codec found for %s.\n", filename);
|
| 646 |
goto error_exit;
|
| 647 |
} |
| 648 |
} |
| 649 |
else
|
| 650 |
{
|
| 651 |
if (! find_codec (ic, & cinfo, & vcinfo))
|
| 652 |
{
|
| 653 |
AUDERR ("No codec found for %s.\n", filename);
|
| 654 |
goto error_exit;
|
| 655 |
} |
| 656 |
} |
| 657 |
} |
| 658 |
else
|
| 659 |
return false; |
| 660 |
|
| 661 |
AUDDBG("got codec %s for stream index %d, opening\n", cinfo.codec->name, cinfo.stream_idx);
|
| 662 |
|
| 663 |
if (avcodec_open2 (cinfo.context, cinfo.codec, NULL) < 0) |
| 664 |
goto error_exit;
|
| 665 |
|
| 666 |
/* JWT: IF abUSER ALSO WANTS TO PLAY VIDEO THEN WE SET UP POP-UP VIDEO SCREEN: */
|
| 667 |
if (play_video)
|
| 668 |
{
|
| 669 |
String video_windowtitle; |
| 670 |
int video_sws_scale = 0; |
| 671 |
if (avcodec_open2 (vcinfo.context, vcinfo.codec, NULL) < 0) |
| 672 |
goto error_exit;
|
| 673 |
|
| 674 |
// Allocate a place to put our YUV image on that screen (initialize to video's size,
|
| 675 |
// then (later) resize to user's size prefs if specified (must do this way to get video to scale:
|
| 676 |
#ifndef __DARWIN__
|
| 677 |
screen = SDL_SetVideoMode(vcinfo.context->width, vcinfo.context->height, 0, SDL_RESIZABLE);
|
| 678 |
#else
|
| 679 |
screen = SDL_SetVideoMode(vcinfo.context->width, vcinfo.context->height, 24, SDL_RESIZABLE);
|
| 680 |
#endif
|
| 681 |
|
| 682 |
video_windowtitle = aud_get_str ("ffaudio", "video_windowtitle"); |
| 683 |
if (video_windowtitle)
|
| 684 |
SDL_WM_SetCaption(video_windowtitle, NULL);
|
| 685 |
if (aud_get_int ("ffaudio", "video_sws_scale")) //USER CAN CHOOSE SWS_SCALE VALUE. |
| 686 |
video_sws_scale = aud_get_int ("ffaudio", "video_sws_scale"); |
| 687 |
else
|
| 688 |
video_sws_scale = SWS_BICUBIC; //default=4
|
| 689 |
|
| 690 |
bmp = SDL_CreateYUVOverlay( |
| 691 |
vcinfo.context->width, |
| 692 |
vcinfo.context->height, |
| 693 |
SDL_YV12_OVERLAY, |
| 694 |
screen |
| 695 |
); |
| 696 |
sws_ctx = sws_getContext ( |
| 697 |
vcinfo.context->width, |
| 698 |
vcinfo.context->height, |
| 699 |
vcinfo.context->pix_fmt, |
| 700 |
vcinfo.context->width, |
| 701 |
vcinfo.context->height, |
| 702 |
PIX_FMT_YUV420P, |
| 703 |
video_sws_scale, |
| 704 |
NULL,
|
| 705 |
NULL,
|
| 706 |
NULL
|
| 707 |
); |
| 708 |
|
| 709 |
/* NOW CALCULATE THE WIDTH, HEIGHT, & ASPECT BASED ON VIDEO'S SIZE & AND ANY USER PARAMATERS GIVEN: */
|
| 710 |
video_aspect_ratio = vcinfo.context->height |
| 711 |
? (float)vcinfo.context->width / (float)vcinfo.context->height : 1.0; |
| 712 |
int vx = aud_get_int ("ffaudio", "video_xsize"); |
| 713 |
int vy = aud_get_int ("ffaudio", "video_ysize"); |
| 714 |
/* JWT:FIXME:FLV AND SWF STREAMS ERROR OUT ON ATTEMPT TO CHANGE SIZE AT STARTUP?!
|
| 715 |
THOUGH USER CAN STILL RESIZE WINDOW LATER! ERROR MSG:
|
| 716 |
"Assertion ref->size >= offset + s->mb_stride * ((f->height+15)/16) failed
|
| 717 |
at libavcodec/mpegvideo.c:1963
|
| 718 |
*/
|
| 719 |
if (! strcmp_nocase (fileext, ".flv") || ! strcmp_nocase (fileext, ".swf")) |
| 720 |
vx = vy = 0;
|
| 721 |
if (vx && !vy) //user specified width only, calc. height based on aspect: |
| 722 |
{
|
| 723 |
vcinfo.context->width = video_width = vx; |
| 724 |
vcinfo.context->height = video_height = (int)((float)vx / video_aspect_ratio); |
| 725 |
} |
| 726 |
else if (!vx && vy) //user specified height only, calc. height based on aspect: |
| 727 |
{
|
| 728 |
vcinfo.context->height = video_height = vy; |
| 729 |
vcinfo.context->width = video_width = (int)((float)vy * video_aspect_ratio); |
| 730 |
|
| 731 |
} |
| 732 |
else if (vx && vy) //user specified fixed width and height: |
| 733 |
{
|
| 734 |
vcinfo.context->width = video_width = vx; |
| 735 |
vcinfo.context->height = video_height = vy; |
| 736 |
} |
| 737 |
else //user specified nothing, use video's desired wXh: |
| 738 |
{
|
| 739 |
video_width = vcinfo.context->width; |
| 740 |
video_height = vcinfo.context->height; |
| 741 |
} |
| 742 |
video_aspect_ratio = video_height |
| 743 |
? (float)video_width / (float)video_height : 1.0; //fall thru to square to avoid possibliity of "/0"! |
| 744 |
|
| 745 |
//NOW "RESIZE" screen to user's wXh, if user set something:
|
| 746 |
if (vx || vy)
|
| 747 |
{
|
| 748 |
#ifndef __DARWIN__
|
| 749 |
screen = SDL_SetVideoMode(video_width, video_height, 0, SDL_RESIZABLE);
|
| 750 |
#else
|
| 751 |
screen = SDL_SetVideoMode(video_width, video_height, 24, SDL_RESIZABLE);
|
| 752 |
#endif
|
| 753 |
|
| 754 |
if(!screen) {
|
| 755 |
AUDERR("SDL: could not re-set video mode - exiting\n");
|
| 756 |
goto error_exit;
|
| 757 |
} |
| 758 |
} |
| 759 |
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER)) {
|
| 760 |
AUDERR("Could not initialize SDL - %s\n", SDL_GetError());
|
| 761 |
goto error_exit;
|
| 762 |
} |
| 763 |
} |
| 764 |
|
| 765 |
/* codec_opened = true; //JWT:DON'T THINK WE NEED ANYMORE. */
|
| 766 |
|
| 767 |
switch (cinfo.context->sample_fmt)
|
| 768 |
{
|
| 769 |
case AV_SAMPLE_FMT_U8: out_fmt = FMT_U8; planar = false; break; |
| 770 |
case AV_SAMPLE_FMT_S16: out_fmt = FMT_S16_NE; planar = false; break; |
| 771 |
case AV_SAMPLE_FMT_S32: out_fmt = FMT_S32_NE; planar = false; break; |
| 772 |
case AV_SAMPLE_FMT_FLT: out_fmt = FMT_FLOAT; planar = false; break; |
| 773 |
|
| 774 |
case AV_SAMPLE_FMT_U8P: out_fmt = FMT_U8; planar = true; break; |
| 775 |
case AV_SAMPLE_FMT_S16P: out_fmt = FMT_S16_NE; planar = true; break; |
| 776 |
case AV_SAMPLE_FMT_S32P: out_fmt = FMT_S32_NE; planar = true; break; |
| 777 |
case AV_SAMPLE_FMT_FLTP: out_fmt = FMT_FLOAT; planar = true; break; |
| 778 |
|
| 779 |
default:
|
| 780 |
AUDERR ("Unsupported audio format %d\n", (int) cinfo.context->sample_fmt); |
| 781 |
goto error_exit;
|
| 782 |
} |
| 783 |
|
| 784 |
/* Open audio output */
|
| 785 |
AUDDBG("opening audio output - bitrate=%d=\n", ic->bit_rate);
|
| 786 |
|
| 787 |
set_stream_bitrate(ic->bit_rate); |
| 788 |
|
| 789 |
open_audio(out_fmt, cinfo.context->sample_rate, cinfo.context->channels); |
| 790 |
|
| 791 |
errcount = 0;
|
| 792 |
|
| 793 |
int seek_value, ret, frameFinished, decoded, len, size, video_qsize, acount, vcount;
|
| 794 |
bool knit1perl2;
|
| 795 |
acount = 0; vcount = 0; /* JWT:LET'S COUNT PACKETS OF EACH SIZE FOR DEBUGGING. */ |
| 796 |
AVFrame * frame; |
| 797 |
AVFrame * vframe; |
| 798 |
/* JWT:video_qsize: MAX # VIDEO PACKETS TO QUEUE UP FOR INTERLACING TO SMOOTH VIDEO
|
| 799 |
PLAYBACK - GOOD RANGE IS 8-56, NOT ENOUGH=JITTERY VIDEO,
|
| 800 |
TOO MANY=AUDIO/VIDEO BECOME NOTICABLY OUT OF SYNC!
|
| 801 |
*/
|
| 802 |
video_qsize = (aud_get_int ("ffaudio", "video_qsize")) |
| 803 |
? aud_get_int ("ffaudio", "video_qsize") : 16; |
| 804 |
if (video_qsize < 1) |
| 805 |
video_qsize = 1;
|
| 806 |
|
| 807 |
pktQueue *pktQ; |
| 808 |
pktQ = createQueue(video_qsize); |
| 809 |
/*
|
| 810 |
JWT: THIS FLAG FORCES THE VIDEO QUEUE TO BE POPPED ONCE FOR EVERY *TWO* AUDIO
|
| 811 |
PACKETS - MOST VIDEOS SEEM TO HAVE A/B 2-1 RATIO OF AUDIO TO VIDEO PACKETS &
|
| 812 |
THIS PRETTY MUCH *ELIMINATED* THE VIDEO JITTER!!!
|
| 813 |
*/
|
| 814 |
knit1perl2 = true; /* ALTERNATES BETWEEN ON AND OFF EVERY OTHER AUDIO PACKET */ |
| 815 |
|
| 816 |
AUDDBG("video queue size %d\n", video_qsize);
|
| 817 |
#if CHECK_LIBAVCODEC_VERSION (55, 45, 101, 55, 28, 1) |
| 818 |
frame = av_frame_alloc (); |
| 819 |
vframe = av_frame_alloc (); |
| 820 |
#else
|
| 821 |
frame = avcodec_alloc_frame (); |
| 822 |
vframe = avcodec_alloc_frame (); |
| 823 |
#endif
|
| 824 |
|
| 825 |
while (! check_stop ())
|
| 826 |
{
|
| 827 |
seek_value = check_seek (); |
| 828 |
|
| 829 |
if (seek_value >= 0) |
| 830 |
{
|
| 831 |
/* JWT:FIRST, PROCESS VIDEO PACKETS SITTING IN THE QUEUE TO CLEAR THE QUEUE! */
|
| 832 |
while (Dequeue (pktQ))
|
| 833 |
continue;
|
| 834 |
|
| 835 |
/* JWT: HAD TO CHANGE THIS FROM "AVSEEK_FLAG_ANY" TO AVSEEK_FLAG_BACKWARD
|
| 836 |
TO GET SEEK TO NOT RANDOMLY BRICK?! */
|
| 837 |
if (av_seek_frame (ic, -1, (int64_t) seek_value * AV_TIME_BASE / |
| 838 |
1000, AVSEEK_FLAG_BACKWARD) < 0) |
| 839 |
{
|
| 840 |
AUDERR ("error while seeking\n");
|
| 841 |
} else
|
| 842 |
errcount = 0;
|
| 843 |
|
| 844 |
seek_value = -1;
|
| 845 |
} |
| 846 |
|
| 847 |
AVPacket tmp; |
| 848 |
|
| 849 |
/* Read next frame (or more) of data */
|
| 850 |
if ((ret = av_read_frame(ic, &pkt)) < 0) |
| 851 |
{
|
| 852 |
if (ret == (int) AVERROR_EOF) |
| 853 |
{
|
| 854 |
AUDDBG("eof reached\n");
|
| 855 |
break;
|
| 856 |
} |
| 857 |
else
|
| 858 |
{
|
| 859 |
if (++errcount > 4) |
| 860 |
{
|
| 861 |
AUDERR ("av_read_frame error %d, giving up.\n", ret);
|
| 862 |
break;
|
| 863 |
} else
|
| 864 |
continue;
|
| 865 |
} |
| 866 |
} else
|
| 867 |
errcount = 0;
|
| 868 |
|
| 869 |
/* Ignore any other substreams */
|
| 870 |
if (pkt.stream_index != cinfo.stream_idx)
|
| 871 |
{
|
| 872 |
if (!play_video || pkt.stream_index != vcinfo.stream_idx)
|
| 873 |
{
|
| 874 |
av_free_packet(&pkt); |
| 875 |
continue;
|
| 876 |
} |
| 877 |
} |
| 878 |
|
| 879 |
/* Decode and play packet/frame */
|
| 880 |
memcpy(&tmp, &pkt, sizeof(tmp));
|
| 881 |
frameFinished = 0;
|
| 882 |
while (tmp.size > 0 && ! check_stop ()) |
| 883 |
{
|
| 884 |
/* Check for seek request and bail out if we have one */
|
| 885 |
if (seek_value < 0) |
| 886 |
seek_value = check_seek (); |
| 887 |
|
| 888 |
if (seek_value >= 0) |
| 889 |
break;
|
| 890 |
|
| 891 |
decoded = 0;
|
| 892 |
if (pkt.stream_index == cinfo.stream_idx) /* PROCESS AUDIO PACKET: */ |
| 893 |
{
|
| 894 |
++acount; |
| 895 |
/* JWT:IF PLAYING VIDEO, CHECK VIDEO QUEUE. */
|
| 896 |
if (play_video)
|
| 897 |
{
|
| 898 |
int i,pkts2pop;
|
| 899 |
/* FIRST PROCESS THE NEXT # VIDEO PACKETS, IF ANY IN THE QUEUE:
|
| 900 |
FOR EVERY *OTHER* AUDIO PACKET, GRAB 1 VIDEO PKT. OFF QUEUE,
|
| 901 |
UNLESS QUEUE IS FULL, THEN GRAB 2 SO WE HAVE ROOM FOR THE NEXT ONE!
|
| 902 |
*/
|
| 903 |
pkts2pop = knit1perl2 ? ((pktQ->size == pktQ->capacity) ? 2 : 1) : 0; |
| 904 |
if (pkts2pop > video_qsize)
|
| 905 |
pkts2pop = video_qsize; |
| 906 |
knit1perl2 = ! knit1perl2; //WE'RE ASSUMING LIKELY NEAR 2x AUDIO TO VIDEO FRAMES:
|
| 907 |
AVPacket * pktRef; |
| 908 |
for (i=1; i<=pkts2pop; i++) |
| 909 |
{
|
| 910 |
pktRef = QFront (pktQ); |
| 911 |
if (pktRef)
|
| 912 |
{
|
| 913 |
|
| 914 |
while (1) |
| 915 |
{
|
| 916 |
if (!write_videoframe (sws_ctx, vframe, bmp, pktRef, &frameFinished,
|
| 917 |
video_width, video_height)) |
| 918 |
break;
|
| 919 |
} |
| 920 |
if (!Dequeue (pktQ))
|
| 921 |
AUDERR("Queue is Empty\n");
|
| 922 |
} |
| 923 |
} |
| 924 |
} |
| 925 |
len = avcodec_decode_audio4 (cinfo.context, frame, & decoded, & tmp); |
| 926 |
if (len < 0) |
| 927 |
{
|
| 928 |
AUDERR ("decode_audio() failed, code %d\n", len);
|
| 929 |
break;
|
| 930 |
} |
| 931 |
|
| 932 |
tmp.size -= len; |
| 933 |
tmp.data += len; |
| 934 |
|
| 935 |
if (! decoded)
|
| 936 |
/* continue; // JWT:NOT SURE WHY THIS WAS A CONTINUE INSTEAD OF A BREAK?! */
|
| 937 |
break;
|
| 938 |
|
| 939 |
size = FMT_SIZEOF (out_fmt) * cinfo.context->channels * frame->nb_samples; |
| 940 |
|
| 941 |
if (planar)
|
| 942 |
{
|
| 943 |
if (size > buf.len ())
|
| 944 |
buf.resize (size); |
| 945 |
|
| 946 |
audio_interlace ((const void * *) frame->data, out_fmt, |
| 947 |
cinfo.context->channels, buf.begin (), frame->nb_samples); |
| 948 |
write_audio (buf.begin (), size); |
| 949 |
} |
| 950 |
else
|
| 951 |
write_audio (frame->data[0], size);
|
| 952 |
|
| 953 |
} |
| 954 |
else /* JWT:PROCESS VIDEO PACKET HERE: */ |
| 955 |
{
|
| 956 |
++vcount; |
| 957 |
/* JWT: IF QUEUE IS FULL, PROCESS NEXT 2 VIDEO PACKETS FROM QUEUE. */
|
| 958 |
if (isQueueFull (pktQ))
|
| 959 |
{
|
| 960 |
int i, pkts2pop;
|
| 961 |
AVPacket * pktRef; |
| 962 |
|
| 963 |
pkts2pop = (video_qsize > 1) ? 2 : 1; |
| 964 |
for (i=1; i<=pkts2pop; i++) |
| 965 |
{
|
| 966 |
pktRef = QFront (pktQ); |
| 967 |
if (pktRef)
|
| 968 |
{
|
| 969 |
while (1) |
| 970 |
{
|
| 971 |
if (!write_videoframe (sws_ctx, vframe, bmp, pktRef, &frameFinished,
|
| 972 |
video_width, video_height)) |
| 973 |
break;
|
| 974 |
} |
| 975 |
if (!Dequeue (pktQ))
|
| 976 |
AUDERR("Queue is Empty\n");
|
| 977 |
} |
| 978 |
} |
| 979 |
AUDDBG("Queue filled.\n");
|
| 980 |
} |
| 981 |
if (Enqueue(pktQ, pkt)) { /* VIDEO PACKETS TAKE A NUMBER & GET IN LINE! */ |
| 982 |
break;
|
| 983 |
} |
| 984 |
AUDERR("Could not enqueue packet, refuses to wait in line, so process NOW!\n");
|
| 985 |
if (!write_videoframe (sws_ctx, vframe, bmp, &pkt, &frameFinished,
|
| 986 |
video_width, video_height)) |
| 987 |
break;
|
| 988 |
} |
| 989 |
|
| 990 |
/* JWT: NOW HANDLE VIDEO UI EVENTS SUCH AS RESIZE OR KILL SCREEN: */
|
| 991 |
if (play_video)
|
| 992 |
{
|
| 993 |
SDL_Rect rect; |
| 994 |
|
| 995 |
SDL_PollEvent(&event); |
| 996 |
switch(event.type) {
|
| 997 |
case SDL_QUIT:
|
| 998 |
SDL_Quit(); |
| 999 |
play_video = false;
|
| 1000 |
//goto error_exit;
|
| 1001 |
break;
|
| 1002 |
|
| 1003 |
case SDL_VIDEORESIZE:
|
| 1004 |
//Resize the screen
|
| 1005 |
float new_aspect_ratio; //FOR SOME REASON C++ REQUIRES 2 LINES HERE?! |
| 1006 |
new_aspect_ratio = event.resize.h |
| 1007 |
? (float)event.resize.w / (float)event.resize.h : 1.0; |
| 1008 |
if (new_aspect_ratio > video_aspect_ratio)
|
| 1009 |
{
|
| 1010 |
video_height = event.resize.h; |
| 1011 |
video_width = (int)(video_aspect_ratio * (float)video_height); |
| 1012 |
} |
| 1013 |
else
|
| 1014 |
{
|
| 1015 |
video_width = event.resize.w; |
| 1016 |
video_height = (int)((float)video_width / video_aspect_ratio); |
| 1017 |
} |
| 1018 |
|
| 1019 |
#ifndef __DARWIN__
|
| 1020 |
screen = SDL_SetVideoMode( video_width, video_height, 0, SDL_RESIZABLE );
|
| 1021 |
#else
|
| 1022 |
screen = SDL_SetVideoMode( video_width, video_height, 24, SDL_RESIZABLE );
|
| 1023 |
#endif
|
| 1024 |
|
| 1025 |
//If there's an error
|
| 1026 |
if( screen == NULL ) |
| 1027 |
break;
|
| 1028 |
|
| 1029 |
case SDL_VIDEOEXPOSE:
|
| 1030 |
rect.x = 0;
|
| 1031 |
rect.y = 0;
|
| 1032 |
rect.w = video_width; |
| 1033 |
rect.h = video_height; |
| 1034 |
SDL_DisplayYUVOverlay(bmp, &rect); |
| 1035 |
break;
|
| 1036 |
|
| 1037 |
default:
|
| 1038 |
break;
|
| 1039 |
} |
| 1040 |
} |
| 1041 |
} |
| 1042 |
} |
| 1043 |
|
| 1044 |
error_exit:
|
| 1045 |
AUDDBG("end of playback - % audio frames, % video frames processed.", acount, vcount);
|
| 1046 |
if (play_video)
|
| 1047 |
SDL_Quit(); |
| 1048 |
|
| 1049 |
#if CHECK_LIBAVCODEC_VERSION (55, 45, 101, 55, 28, 1) |
| 1050 |
av_frame_free (& vframe); |
| 1051 |
av_frame_free (& frame); |
| 1052 |
#elif CHECK_LIBAVCODEC_VERSION (54, 59, 100, 54, 28, 0) |
| 1053 |
avcodec_free_frame (& vframe); |
| 1054 |
avcodec_free_frame (& frame); |
| 1055 |
#else
|
| 1056 |
av_free (vframe); |
| 1057 |
av_free (frame); |
| 1058 |
#endif
|
| 1059 |
|
| 1060 |
if (pkt.data)
|
| 1061 |
av_free_packet(&pkt); |
| 1062 |
if (ic != nullptr) |
| 1063 |
close_input_file(ic, & cinfo, & vcinfo); |
| 1064 |
|
| 1065 |
return ! error;
|
| 1066 |
} |
| 1067 |
|
| 1068 |
const char FFaudio::about[] = |
| 1069 |
N_("Multi-format audio decoding plugin for Audacious using\n"
|
| 1070 |
"FFmpeg multimedia framework (http://www.ffmpeg.org/)\n"
|
| 1071 |
"\n"
|
| 1072 |
"Audacious plugin by:\n"
|
| 1073 |
"William Pitcock <nenolod@nenolod.net>\n"
|
| 1074 |
"Matti Hämäläinen <ccr@tnsp.org>");
|
| 1075 |
|
| 1076 |
const char * const FFaudio::exts[] = { |
| 1077 |
/* musepack, SV7/SV8 */
|
| 1078 |
"mpc", "mp+", "mpp", |
| 1079 |
|
| 1080 |
/* windows media audio */
|
| 1081 |
"wma",
|
| 1082 |
|
| 1083 |
/* shorten */
|
| 1084 |
"shn",
|
| 1085 |
|
| 1086 |
/* atrac3 */
|
| 1087 |
"aa3", "oma", |
| 1088 |
|
| 1089 |
/* MPEG 2/4 AC3 */
|
| 1090 |
"ac3",
|
| 1091 |
|
| 1092 |
/* monkey's audio */
|
| 1093 |
"ape",
|
| 1094 |
|
| 1095 |
/* DTS */
|
| 1096 |
"dts",
|
| 1097 |
|
| 1098 |
/* VQF */
|
| 1099 |
"vqf",
|
| 1100 |
|
| 1101 |
/* MPEG-4 */
|
| 1102 |
"m4a", "mp4", |
| 1103 |
|
| 1104 |
/* WAV (there are some WAV formats sndfile can't handle) */
|
| 1105 |
"wav",
|
| 1106 |
|
| 1107 |
/* Handle OGG streams (FLAC/Vorbis etc.) */
|
| 1108 |
"ogg", "oga", |
| 1109 |
|
| 1110 |
/* Opus */
|
| 1111 |
"opus",
|
| 1112 |
|
| 1113 |
/* Speex */
|
| 1114 |
"spx",
|
| 1115 |
|
| 1116 |
/* True Audio */
|
| 1117 |
"tta",
|
| 1118 |
|
| 1119 |
/* AVI // JWT:ADDED */
|
| 1120 |
"avi",
|
| 1121 |
|
| 1122 |
/* FLV // JWT:ADDED */
|
| 1123 |
"flv",
|
| 1124 |
|
| 1125 |
/* end of table */
|
| 1126 |
nullptr
|
| 1127 |
}; |
| 1128 |
|
| 1129 |
const char * const FFaudio::mimes[] = {"application/ogg", nullptr}; |
