scrobblers.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855
  1. import logging
  2. import re
  3. from datetime import datetime
  4. from typing import Optional
  5. import pendulum
  6. import pytz
  7. from beers.models import Beer
  8. from boardgames.models import BoardGame
  9. from books.models import Book
  10. from dateutil.parser import parse
  11. from django.utils import timezone
  12. from locations.constants import LOCATION_PROVIDERS
  13. from locations.models import GeoLocation
  14. from music.constants import JELLYFIN_POST_KEYS, MOPIDY_POST_KEYS
  15. from music.models import Track
  16. from podcasts.models import PodcastEpisode
  17. from podcasts.utils import parse_mopidy_uri
  18. from puzzles.models import Puzzle
  19. from scrobbles.constants import (
  20. JELLYFIN_AUDIO_ITEM_TYPES,
  21. MANUAL_SCROBBLE_FNS,
  22. SCROBBLE_CONTENT_URLS,
  23. )
  24. from scrobbles.models import Scrobble
  25. from scrobbles.utils import convert_to_seconds
  26. from sports.models import SportEvent
  27. from sports.thesportsdb import lookup_event_from_thesportsdb
  28. from tasks.models import Task
  29. from videogames.howlongtobeat import lookup_game_from_hltb
  30. from videogames.models import VideoGame
  31. from videos.models import Video
  32. from webpages.models import WebPage
  33. from vrobbler.apps.tasks.constants import (
  34. TODOIST_TITLE_PREFIX_LABELS,
  35. TODOIST_TITLE_SUFFIX_LABELS,
  36. )
  37. logger = logging.getLogger(__name__)
  38. def mopidy_scrobble_media(post_data: dict, user_id: int) -> Scrobble:
  39. media_type = Scrobble.MediaType.TRACK
  40. if "podcast" in post_data.get("mopidy_uri", ""):
  41. media_type = Scrobble.MediaType.PODCAST_EPISODE
  42. logger.info(
  43. "[mopidy_webhook] called",
  44. extra={
  45. "user_id": user_id,
  46. "post_data": post_data,
  47. "media_type": media_type,
  48. },
  49. )
  50. if media_type == Scrobble.MediaType.PODCAST_EPISODE:
  51. parsed_data = parse_mopidy_uri(post_data.get("mopidy_uri", ""))
  52. podcast_name = post_data.get(
  53. "album", parsed_data.get("podcast_name", "")
  54. )
  55. media_obj = PodcastEpisode.find_or_create(
  56. title=parsed_data.get("episode_filename", ""),
  57. podcast_name=podcast_name,
  58. producer_name=post_data.get("artist", ""),
  59. number=parsed_data.get("episode_num", ""),
  60. pub_date=parsed_data.get("pub_date", ""),
  61. mopidy_uri=post_data.get("mopidy_uri", ""),
  62. )
  63. else:
  64. media_obj = Track.find_or_create(
  65. title=post_data.get("name", ""),
  66. artist_name=post_data.get("artist", ""),
  67. album_name=post_data.get("album", ""),
  68. run_time_seconds=post_data.get("run_time", 900000),
  69. )
  70. log = {}
  71. try:
  72. log = {"mopidy_source": post_data.get("mopidy_uri", "").split(":")[0]}
  73. except IndexError:
  74. pass
  75. return media_obj.scrobble_for_user(
  76. user_id,
  77. source="Mopidy",
  78. playback_position_seconds=int(
  79. post_data.get(MOPIDY_POST_KEYS.get("PLAYBACK_POSITION_TICKS"), 1)
  80. / 1000
  81. ),
  82. status=post_data.get(MOPIDY_POST_KEYS.get("STATUS"), ""),
  83. log=log,
  84. )
  85. def jellyfin_scrobble_media(
  86. post_data: dict, user_id: int
  87. ) -> Optional[Scrobble]:
  88. media_type = Scrobble.MediaType.VIDEO
  89. if post_data.pop("ItemType", "") in JELLYFIN_AUDIO_ITEM_TYPES:
  90. media_type = Scrobble.MediaType.TRACK
  91. null_position_on_progress = (
  92. post_data.get("PlaybackPosition") == "00:00:00"
  93. and post_data.get("NotificationType") == "PlaybackProgress"
  94. )
  95. # Jellyfin has some race conditions with it's webhooks, these hacks fix some of them
  96. if null_position_on_progress:
  97. logger.info(
  98. "[jellyfin_scrobble_media] no playback position tick, aborting",
  99. extra={"post_data": post_data},
  100. )
  101. return
  102. timestamp = parse(
  103. post_data.get(JELLYFIN_POST_KEYS.get("TIMESTAMP"), "")
  104. ).replace(tzinfo=pytz.utc)
  105. playback_position_seconds = int(
  106. post_data.get(JELLYFIN_POST_KEYS.get("PLAYBACK_POSITION_TICKS"), 1)
  107. / 10000000
  108. )
  109. if media_type == Scrobble.MediaType.VIDEO:
  110. media_obj = Video.get_from_imdb_id(
  111. post_data.get("Provider_imdb", "").replace("tt", "")
  112. )
  113. else:
  114. media_obj = Track.find_or_create(
  115. title=post_data.get("Name", ""),
  116. artist_name=post_data.get("Artist", ""),
  117. album_name=post_data.get("Album", ""),
  118. run_time_seconds=convert_to_seconds(
  119. post_data.get("RunTime", 900000)
  120. ),
  121. musicbrainz_id=post_data.get("Provider_musicbrainztrack", ""),
  122. )
  123. # A hack because we don't worry about updating music ... we either finish it or we don't
  124. playback_position_seconds = 0
  125. if not media_obj:
  126. logger.info(
  127. "[jellyfin_scrobble_media] no video found from POST data",
  128. extra={"post_data": post_data},
  129. )
  130. return
  131. playback_status = "resumed"
  132. if post_data.get("IsPaused"):
  133. playback_status = "paused"
  134. elif post_data.get("NotificationType") == "PlaybackStop":
  135. playback_status = "stopped"
  136. return media_obj.scrobble_for_user(
  137. user_id,
  138. source=post_data.get(JELLYFIN_POST_KEYS.get("SOURCE")),
  139. playback_position_seconds=playback_position_seconds,
  140. status=playback_status,
  141. )
  142. def web_scrobbler_scrobble_media(
  143. youtube_id: str, user_id: int, status: str = "started"
  144. ) -> Optional[Scrobble]:
  145. video = Video.get_from_youtube_id(youtube_id)
  146. return video.scrobble_for_user(user_id, status, source="Web Scrobbler")
  147. def manual_scrobble_video(
  148. video_id: str, user_id: int, action: Optional[str] = None
  149. ):
  150. if "tt" in video_id:
  151. video = Video.get_from_imdb_id(video_id)
  152. else:
  153. video = Video.get_from_youtube_id(video_id)
  154. # When manually scrobbling, try finding a source from the series
  155. source = "Vrobbler"
  156. if video.tv_series:
  157. source = video.tv_series.preferred_source
  158. scrobble_dict = {
  159. "user_id": user_id,
  160. "timestamp": timezone.now(),
  161. "playback_position_seconds": 0,
  162. "source": source,
  163. }
  164. logger.info(
  165. "[scrobblers] manual video scrobble request received",
  166. extra={
  167. "video_id": video.id,
  168. "user_id": user_id,
  169. "scrobble_dict": scrobble_dict,
  170. "media_type": Scrobble.MediaType.VIDEO,
  171. },
  172. )
  173. scrobble = Scrobble.create_or_update(video, user_id, scrobble_dict)
  174. if action == "stop":
  175. scrobble.stop(force_finish=True)
  176. return scrobble
  177. def manual_scrobble_event(
  178. thesportsdb_id: str, user_id: int, action: Optional[str] = None
  179. ):
  180. data_dict = lookup_event_from_thesportsdb(thesportsdb_id)
  181. event = SportEvent.find_or_create(data_dict)
  182. scrobble_dict = {
  183. "user_id": user_id,
  184. "timestamp": timezone.now(),
  185. "playback_position_seconds": 0,
  186. "source": "TheSportsDB",
  187. }
  188. return Scrobble.create_or_update(event, user_id, scrobble_dict)
  189. def manual_scrobble_video_game(
  190. hltb_id: str, user_id: int, action: Optional[str] = None
  191. ):
  192. game = VideoGame.objects.filter(hltb_id=hltb_id).first()
  193. if not game:
  194. data_dict = lookup_game_from_hltb(hltb_id)
  195. if not data_dict:
  196. logger.info(
  197. "[manual_scrobble_video_game] game not found on hltb",
  198. extra={
  199. "hltb_id": hltb_id,
  200. "user_id": user_id,
  201. "media_type": Scrobble.MediaType.VIDEO_GAME,
  202. },
  203. )
  204. return
  205. game = VideoGame.find_or_create(data_dict)
  206. scrobble_dict = {
  207. "user_id": user_id,
  208. "timestamp": timezone.now(),
  209. "playback_position_seconds": 0,
  210. "source": "Vrobbler",
  211. "long_play_complete": False,
  212. }
  213. logger.info(
  214. "[scrobblers] manual video game scrobble request received",
  215. extra={
  216. "videogame_id": game.id,
  217. "user_id": user_id,
  218. "scrobble_dict": scrobble_dict,
  219. "media_type": Scrobble.MediaType.VIDEO_GAME,
  220. },
  221. )
  222. return Scrobble.create_or_update(game, user_id, scrobble_dict)
  223. def manual_scrobble_book(
  224. title: str, user_id: int, action: Optional[str] = None
  225. ):
  226. book = Book.get_from_google(title)
  227. scrobble_dict = {
  228. "user_id": user_id,
  229. "timestamp": timezone.now(),
  230. "playback_position_seconds": 0,
  231. "source": "Vrobbler",
  232. "long_play_complete": False,
  233. }
  234. logger.info(
  235. "[scrobblers] manual book scrobble request received",
  236. extra={
  237. "book_id": book.id,
  238. "user_id": user_id,
  239. "scrobble_dict": scrobble_dict,
  240. "media_type": Scrobble.MediaType.BOOK,
  241. },
  242. )
  243. return Scrobble.create_or_update(book, user_id, scrobble_dict)
  244. def manual_scrobble_board_game(
  245. bggeek_id: str, user_id: int, action: Optional[str] = None
  246. ):
  247. boardgame = BoardGame.find_or_create(bggeek_id)
  248. if not boardgame:
  249. logger.error(f"No board game found for ID {bggeek_id}")
  250. return
  251. scrobble_dict = {
  252. "user_id": user_id,
  253. "timestamp": timezone.now(),
  254. "playback_position_seconds": 0,
  255. "source": "Vrobbler",
  256. }
  257. logger.info(
  258. "[vrobbler-scrobble] board game scrobble request received",
  259. extra={
  260. "boardgame_id": boardgame.id,
  261. "user_id": user_id,
  262. "scrobble_dict": scrobble_dict,
  263. "media_type": Scrobble.MediaType.BOARD_GAME,
  264. },
  265. )
  266. return Scrobble.create_or_update(boardgame, user_id, scrobble_dict)
  267. def manual_scrobble_from_url(
  268. url: str, user_id: int, action: Optional[str] = None
  269. ) -> Scrobble:
  270. """We have scrobblable media URLs, and then any other webpages that
  271. we want to scrobble as a media type in and of itself. This checks whether
  272. we know about the content type, and routes it to the appropriate media
  273. scrobbler. Otherwise, return nothing."""
  274. content_key = ""
  275. try:
  276. domain = url.split("//")[-1].split("/")[0]
  277. except IndexError:
  278. domain = None
  279. for key, content_url in SCROBBLE_CONTENT_URLS.items():
  280. if domain in content_url:
  281. content_key = key
  282. item_id = None
  283. if not content_key:
  284. content_key = "-w"
  285. item_id = url
  286. # Try generic search for any URL with digit-based IDs
  287. if not item_id:
  288. try:
  289. item_id = re.findall(r"\d+", url)[0]
  290. except IndexError:
  291. pass
  292. if content_key == "-i":
  293. item_id = url.split("v=")[1].split("&")[0]
  294. scrobble_fn = MANUAL_SCROBBLE_FNS[content_key]
  295. return eval(scrobble_fn)(item_id, user_id, action=action)
  296. def todoist_scrobble_task_finish(
  297. todoist_task: dict, user_id: int, timestamp: datetime
  298. ) -> Optional[Scrobble]:
  299. scrobble = Scrobble.objects.filter(
  300. user_id=user_id,
  301. log__todoist_id=todoist_task.get("todoist_id"),
  302. in_progress=True,
  303. played_to_completion=False,
  304. ).first()
  305. if not scrobble:
  306. logger.info(
  307. "[todoist_scrobble_task_finish] todoist webhook finish called on missing task"
  308. )
  309. return
  310. scrobble.stop(timestamp=timestamp, force_finish=True)
  311. return scrobble
  312. def todoist_scrobble_update_task(
  313. todoist_note: dict, user_id: int
  314. ) -> Optional[Scrobble]:
  315. scrobble = Scrobble.objects.filter(
  316. in_progress=True,
  317. user_id=user_id,
  318. log__todoist_id=todoist_note.get("task_id"),
  319. ).first()
  320. if not scrobble:
  321. logger.info(
  322. "[todoist_scrobble_update_task] no task found",
  323. extra={
  324. "todoist_note": todoist_note,
  325. "user_id": user_id,
  326. "media_type": Scrobble.MediaType.TASK,
  327. },
  328. )
  329. return
  330. existing_notes = scrobble.log.get("notes", {})
  331. existing_notes[todoist_note.get("todoist_id")] = todoist_note.get("notes")
  332. scrobble.log["notes"] = existing_notes
  333. scrobble.save(update_fields=["log"])
  334. logger.info(
  335. "[todoist_scrobble_update_task] todoist note added",
  336. extra={
  337. "todoist_note": todoist_note,
  338. "user_id": user_id,
  339. "media_type": Scrobble.MediaType.TASK,
  340. },
  341. )
  342. return scrobble
  343. def todoist_scrobble_task(
  344. todoist_task: dict,
  345. user_id: int,
  346. started: bool = False,
  347. stopped: bool = False,
  348. ) -> Scrobble:
  349. prefix = ""
  350. suffix = ""
  351. # TODO look up the user profile and instead of checking PREFIX/SUFFIX, check against
  352. # user.profile.task_context_tags which will result in context-based tag titles
  353. # We'd also have to migrate existing tasks to the new context based ones (maybe)
  354. for label in todoist_task["todoist_label_list"]:
  355. if label in TODOIST_TITLE_PREFIX_LABELS:
  356. prefix = label
  357. if label in TODOIST_TITLE_SUFFIX_LABELS:
  358. suffix = label
  359. if not prefix and suffix:
  360. logger.warning(
  361. "Missing a prefix and suffix tag for task",
  362. extra={"todoist_scrobble_task": todoist_task},
  363. )
  364. title = " ".join([prefix.capitalize(), suffix.capitalize()])
  365. task = Task.find_or_create(title)
  366. timestamp = pendulum.parse(todoist_task.get("updated_at", timezone.now()))
  367. in_progress_scrobble = Scrobble.objects.filter(
  368. user_id=user_id,
  369. in_progress=True,
  370. log__todoist_id=todoist_task.get("todoist_id"),
  371. task=task,
  372. ).last()
  373. if not in_progress_scrobble and stopped:
  374. logger.info(
  375. "[todoist_scrobble_task] cannot stop already stopped task",
  376. extra={
  377. "todoist_type": todoist_task["todoist_type"],
  378. "todoist_event": todoist_task["todoist_event"],
  379. "todoist_id": todoist_task["todoist_id"],
  380. },
  381. )
  382. return
  383. if in_progress_scrobble and started:
  384. logger.info(
  385. "[todoist_scrobble_task] cannot start already started task",
  386. extra={
  387. "todoist_type": todoist_task["todoist_type"],
  388. "todoist_event": todoist_task["todoist_event"],
  389. "todoist_id": todoist_task["todoist_id"],
  390. },
  391. )
  392. return in_progress_scrobble
  393. # Finish an in-progress scrobble
  394. if in_progress_scrobble and stopped:
  395. logger.info(
  396. "[todoist_scrobble_task] finishing",
  397. extra={
  398. "todoist_type": todoist_task["todoist_type"],
  399. "todoist_event": todoist_task["todoist_event"],
  400. "todoist_id": todoist_task["todoist_id"],
  401. },
  402. )
  403. return todoist_scrobble_task_finish(todoist_task, user_id, timestamp)
  404. # Default to create new scrobble "if not in_progress_scrobble and in_progress_in_todoist"
  405. # TODO Should use updated_at from TOdoist, but parsing isn't working
  406. scrobble_dict = {
  407. "user_id": user_id,
  408. "timestamp": timestamp,
  409. "playback_position_seconds": 0,
  410. "source": "Todoist",
  411. "log": todoist_task,
  412. }
  413. logger.info(
  414. "[todoist_scrobble_task] creating",
  415. extra={
  416. "task_id": task.id,
  417. "user_id": user_id,
  418. "scrobble_dict": scrobble_dict,
  419. "media_type": Scrobble.MediaType.TASK,
  420. },
  421. )
  422. scrobble = Scrobble.create_or_update(task, user_id, scrobble_dict)
  423. return scrobble
  424. def emacs_scrobble_update_task(
  425. emacs_id: str, emacs_notes: dict, user_id: int
  426. ) -> Optional[Scrobble]:
  427. scrobble = Scrobble.objects.filter(
  428. in_progress=True,
  429. user_id=user_id,
  430. log__source_id=emacs_id,
  431. log__source="emacs",
  432. ).first()
  433. if not scrobble:
  434. logger.info(
  435. "[emacs_scrobble_update_task] no task found",
  436. extra={
  437. "emacs_notes": emacs_notes,
  438. "user_id": user_id,
  439. "media_type": Scrobble.MediaType.TASK,
  440. },
  441. )
  442. return
  443. notes_updated = False
  444. for note in emacs_notes:
  445. existing_note_ts = [
  446. n.get("timestamp") for n in scrobble.log.get("notes", [])
  447. ]
  448. if not scrobble.log.get('notes"'):
  449. scrobble.log["notes"] = []
  450. if note.get("timestamp") not in existing_note_ts:
  451. scrobble.log["notes"].append(
  452. {note.get("timestamp"): note.get("content")}
  453. )
  454. notes_updated = True
  455. if notes_updated:
  456. scrobble.save(update_fields=["log"])
  457. logger.info(
  458. "[emacs_scrobble_update_task] emacs note added",
  459. extra={
  460. "emacs_note": emacs_notes,
  461. "user_id": user_id,
  462. "media_type": Scrobble.MediaType.TASK,
  463. },
  464. )
  465. return scrobble
  466. def emacs_scrobble_task(
  467. task_data: dict,
  468. user_id: int,
  469. started: bool = False,
  470. stopped: bool = False,
  471. ) -> Scrobble | None:
  472. prefix = ""
  473. suffix = ""
  474. source_id = task_data.get("source_id")
  475. for label in task_data.get("labels"):
  476. if label in TODOIST_TITLE_PREFIX_LABELS:
  477. prefix = label
  478. if label in TODOIST_TITLE_SUFFIX_LABELS:
  479. suffix = label
  480. if not prefix and not suffix:
  481. logger.warning(
  482. "Missing a prefix and suffix tag for task",
  483. extra={"emacs_scrobble_task": task_data},
  484. )
  485. title = " ".join([prefix.capitalize(), suffix.capitalize()])
  486. task = Task.find_or_create(title)
  487. timestamp = pendulum.parse(task_data.get("updated_at", timezone.now()))
  488. in_progress_scrobble = Scrobble.objects.filter(
  489. user_id=user_id,
  490. in_progress=True,
  491. log__source_id=source_id,
  492. log__source="emacs",
  493. task=task,
  494. ).last()
  495. if not in_progress_scrobble and stopped:
  496. logger.info(
  497. "[emacs_scrobble_task] cannot stop already stopped task",
  498. extra={
  499. "emacs_id": source_id,
  500. },
  501. )
  502. return
  503. if in_progress_scrobble and started:
  504. logger.info(
  505. "[emacs_scrobble_task] cannot start already started task",
  506. extra={
  507. "emacs_id": source_id,
  508. },
  509. )
  510. return in_progress_scrobble
  511. # Finish an in-progress scrobble
  512. if in_progress_scrobble and stopped:
  513. logger.info(
  514. "[emacs_scrobble_task] finishing",
  515. extra={
  516. "emacs_id": source_id,
  517. },
  518. )
  519. in_progress_scrobble.stop(timestamp=timestamp, force_finish=True)
  520. return in_progress_scrobble
  521. if in_progress_scrobble:
  522. return in_progress_scrobble
  523. notes = task_data.pop("notes")
  524. if notes:
  525. task_data["notes"] = []
  526. for note in notes:
  527. task_data["notes"].append(
  528. {note.get("timestamp"): note.get("content")}
  529. )
  530. scrobble_dict = {
  531. "user_id": user_id,
  532. "timestamp": timestamp,
  533. "playback_position_seconds": 0,
  534. "source": "Org-mode",
  535. "log": task_data,
  536. }
  537. logger.info(
  538. "[emacs_scrobble_task] creating",
  539. extra={
  540. "task_id": task.id,
  541. "user_id": user_id,
  542. "scrobble_dict": scrobble_dict,
  543. "media_type": Scrobble.MediaType.TASK,
  544. },
  545. )
  546. scrobble = Scrobble.create_or_update(task, user_id, scrobble_dict)
  547. return scrobble
  548. def manual_scrobble_task(url: str, user_id: int, action: Optional[str] = None):
  549. source_id = re.findall(r"\d+", url)[0]
  550. if "todoist" in url:
  551. source = "Todoist"
  552. title = "Generic Todoist task"
  553. description = " ".join(url.split("/")[-1].split("-")[:-1]).capitalize()
  554. task = Task.find_or_create(title)
  555. scrobble_dict = {
  556. "user_id": user_id,
  557. "timestamp": timezone.now(),
  558. "playback_position_seconds": 0,
  559. "source": source,
  560. "log": {"description": description, "source_id": source_id},
  561. }
  562. logger.info(
  563. "[vrobbler-scrobble] webpage scrobble request received",
  564. extra={
  565. "task_id": task.id,
  566. "user_id": user_id,
  567. "scrobble_dict": scrobble_dict,
  568. "media_type": Scrobble.MediaType.WEBPAGE,
  569. },
  570. )
  571. scrobble = Scrobble.create_or_update(task, user_id, scrobble_dict)
  572. return scrobble
  573. def manual_scrobble_webpage(
  574. url: str, user_id: int, action: Optional[str] = None
  575. ):
  576. webpage = WebPage.find_or_create({"url": url})
  577. scrobble_dict = {
  578. "user_id": user_id,
  579. "timestamp": timezone.now(),
  580. "playback_position_seconds": 0,
  581. "source": "Vrobbler",
  582. }
  583. logger.info(
  584. "[vrobbler-scrobble] webpage scrobble request received",
  585. extra={
  586. "webpage_id": webpage.id,
  587. "user_id": user_id,
  588. "scrobble_dict": scrobble_dict,
  589. "media_type": Scrobble.MediaType.WEBPAGE,
  590. },
  591. )
  592. scrobble = Scrobble.create_or_update(webpage, user_id, scrobble_dict)
  593. # possibly async this?
  594. scrobble.push_to_archivebox()
  595. return scrobble
  596. def gpslogger_scrobble_location(data_dict: dict, user_id: int) -> Scrobble:
  597. location = GeoLocation.find_or_create(data_dict)
  598. timestamp = pendulum.parse(data_dict.get("time", timezone.now()))
  599. extra_data = {
  600. "user_id": user_id,
  601. "timestamp": timestamp,
  602. "source": "GPSLogger",
  603. "media_type": Scrobble.MediaType.GEO_LOCATION,
  604. }
  605. scrobble = Scrobble.create_or_update_location(
  606. location,
  607. extra_data,
  608. user_id,
  609. )
  610. provider = LOCATION_PROVIDERS[data_dict.get("prov")]
  611. if "gps_updates" not in scrobble.log.keys():
  612. scrobble.log["gps_updates"] = []
  613. scrobble.log["gps_updates"].append(
  614. {
  615. "timestamp": data_dict.get("time"),
  616. "position_provider": provider,
  617. }
  618. )
  619. if scrobble.timestamp:
  620. scrobble.playback_position_seconds = (
  621. timezone.now() - scrobble.timestamp
  622. ).seconds
  623. scrobble.save(update_fields=["log", "playback_position_seconds"])
  624. logger.info(
  625. "[gpslogger_webhook] gpslogger scrobble request received",
  626. extra={
  627. "scrobble_id": scrobble.id,
  628. "provider": provider,
  629. "user_id": user_id,
  630. "timestamp": extra_data.get("timestamp"),
  631. "raw_timestamp": data_dict.get("time"),
  632. "media_type": Scrobble.MediaType.GEO_LOCATION,
  633. },
  634. )
  635. return scrobble
  636. def web_scrobbler_scrobble_video_or_song(
  637. data_dict: dict, user_id: Optional[int]
  638. ) -> Scrobble:
  639. # We're not going to create music tracks, because the only time
  640. # we'd hit this is if we're listening to a concert or something.
  641. artist_name = data_dict.get("artist")
  642. track_name = data_dict.get("track")
  643. tracks = Track.objects.filter(
  644. artist__name=data_dict.get("artist"), title=data_dict.get("track")
  645. )
  646. if tracks.count() > 1:
  647. logger.warning(
  648. "Multiple tracks found for Web Scrobbler",
  649. extra={"artist": artist_name, "track": track_name},
  650. )
  651. track = tracks.first()
  652. # No track found, create a Video
  653. if not track:
  654. Video.get_from_youtube_id()
  655. # Now we run off a scrobble
  656. mopidy_data = {
  657. "user_id": user_id,
  658. "timestamp": timezone.now(),
  659. "playback_position_seconds": data_dict.get("playback_time_ticks"),
  660. "source": "Mopidy",
  661. "mopidy_status": data_dict.get("status"),
  662. }
  663. logger.info(
  664. "[scrobblers] webhook mopidy scrobble request received",
  665. extra={
  666. "episode_id": episode.id if episode else None,
  667. "user_id": user_id,
  668. "scrobble_dict": mopidy_data,
  669. "media_type": Scrobble.MediaType.PODCAST_EPISODE,
  670. },
  671. )
  672. scrobble = None
  673. if episode:
  674. scrobble = Scrobble.create_or_update(episode, user_id, mopidy_data)
  675. return scrobble
  676. def manual_scrobble_beer(
  677. untappd_id: str, user_id: int, action: Optional[str] = None
  678. ):
  679. beer = Beer.find_or_create(untappd_id)
  680. if not beer:
  681. logger.error(f"No beer found for Untappd ID {untappd_id}")
  682. return
  683. scrobble_dict = {
  684. "user_id": user_id,
  685. "timestamp": timezone.now(),
  686. "playback_position_seconds": 0,
  687. "source": "Vrobbler",
  688. }
  689. logger.info(
  690. "[vrobbler-scrobble] beer scrobble request received",
  691. extra={
  692. "beer_id": beer.id,
  693. "user_id": user_id,
  694. "scrobble_dict": scrobble_dict,
  695. "media_type": Scrobble.MediaType.BEER,
  696. },
  697. )
  698. # TODO Kick out a process to enrich the media here, and in every scrobble event
  699. return Scrobble.create_or_update(beer, user_id, scrobble_dict)
  700. def manual_scrobble_puzzle(
  701. ipdb_id: str, user_id: int, action: Optional[str] = None
  702. ):
  703. puzzle = Puzzle.find_or_create(ipdb_id)
  704. if not puzzle:
  705. logger.error(f"No puzzle found for IPDB ID {ipdb_id}")
  706. return
  707. scrobble_dict = {
  708. "user_id": user_id,
  709. "timestamp": timezone.now(),
  710. "playback_position_seconds": 0,
  711. "source": "Vrobbler",
  712. }
  713. logger.info(
  714. "[vrobbler-scrobble] puzzle scrobble request received",
  715. extra={
  716. "puzzle_id": puzzle.id,
  717. "user_id": user_id,
  718. "scrobble_dict": scrobble_dict,
  719. "media_type": Scrobble.MediaType.PUZZLE,
  720. },
  721. )
  722. # TODO Kick out a process to enrich the media here, and in every scrobble event
  723. return Scrobble.create_or_update(puzzle, user_id, scrobble_dict)