scrobblers.py 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000
  1. import logging
  2. import re
  3. from datetime import datetime, timedelta
  4. from typing import Any, Optional
  5. import pendulum
  6. import pytz
  7. from beers.models import Beer
  8. from boardgames.models import BoardGame, BoardGameDesigner, BoardGameLocation
  9. from books.models import Book
  10. from bricksets.models import BrickSet
  11. from dateutil.parser import parse
  12. from django.utils import timezone
  13. from locations.constants import LOCATION_PROVIDERS
  14. from locations.models import GeoLocation
  15. from music.constants import JELLYFIN_POST_KEYS, MOPIDY_POST_KEYS
  16. from music.models import Track
  17. from people.models import Person
  18. from podcasts.models import PodcastEpisode
  19. from podcasts.utils import parse_mopidy_uri
  20. from puzzles.models import Puzzle
  21. from scrobbles.constants import (
  22. JELLYFIN_AUDIO_ITEM_TYPES,
  23. MANUAL_SCROBBLE_FNS,
  24. SCROBBLE_CONTENT_URLS,
  25. )
  26. from scrobbles.models import Scrobble
  27. from scrobbles.utils import convert_to_seconds, extract_domain
  28. from sports.models import SportEvent
  29. from sports.thesportsdb import lookup_event_from_thesportsdb
  30. from tasks.models import Task
  31. from tasks.utils import get_title_from_labels
  32. from videogames.howlongtobeat import lookup_game_from_hltb
  33. from videogames.models import VideoGame
  34. from videos.models import Video
  35. from webpages.models import WebPage
  36. logger = logging.getLogger(__name__)
  37. def mopidy_scrobble_media(post_data: dict, user_id: int) -> Scrobble:
  38. media_type = Scrobble.MediaType.TRACK
  39. if "podcast" in post_data.get("mopidy_uri", ""):
  40. media_type = Scrobble.MediaType.PODCAST_EPISODE
  41. logger.info(
  42. "[mopidy_webhook] called",
  43. extra={
  44. "user_id": user_id,
  45. "post_data": post_data,
  46. "media_type": media_type,
  47. },
  48. )
  49. if media_type == Scrobble.MediaType.PODCAST_EPISODE:
  50. parsed_data = parse_mopidy_uri(post_data.get("mopidy_uri", ""))
  51. podcast_name = post_data.get(
  52. "album", parsed_data.get("podcast_name", "")
  53. )
  54. media_obj = PodcastEpisode.find_or_create(
  55. title=parsed_data.get("episode_filename", ""),
  56. podcast_name=podcast_name,
  57. producer_name=post_data.get("artist", ""),
  58. number=parsed_data.get("episode_num", ""),
  59. pub_date=parsed_data.get("pub_date", ""),
  60. mopidy_uri=post_data.get("mopidy_uri", ""),
  61. )
  62. else:
  63. media_obj = Track.find_or_create(
  64. title=post_data.get("name", ""),
  65. artist_name=post_data.get("artist", ""),
  66. album_name=post_data.get("album", ""),
  67. run_time_seconds=post_data.get("run_time", 900000),
  68. )
  69. log = {}
  70. try:
  71. log = {"mopidy_source": post_data.get("mopidy_uri", "").split(":")[0]}
  72. except IndexError:
  73. pass
  74. return media_obj.scrobble_for_user(
  75. user_id,
  76. source="Mopidy",
  77. playback_position_seconds=int(
  78. post_data.get(MOPIDY_POST_KEYS.get("PLAYBACK_POSITION_TICKS"), 1)
  79. / 1000
  80. ),
  81. status=post_data.get(MOPIDY_POST_KEYS.get("STATUS"), ""),
  82. log=log,
  83. )
  84. def jellyfin_scrobble_media(
  85. post_data: dict, user_id: int
  86. ) -> Optional[Scrobble]:
  87. media_type = Scrobble.MediaType.VIDEO
  88. if post_data.pop("ItemType", "") in JELLYFIN_AUDIO_ITEM_TYPES:
  89. media_type = Scrobble.MediaType.TRACK
  90. null_position_on_progress = (
  91. post_data.get("PlaybackPosition") == "00:00:00"
  92. and post_data.get("NotificationType") == "PlaybackProgress"
  93. )
  94. # Jellyfin has some race conditions with it's webhooks, these hacks fix some of them
  95. if null_position_on_progress:
  96. logger.info(
  97. "[jellyfin_scrobble_media] no playback position tick, aborting",
  98. extra={"post_data": post_data},
  99. )
  100. return
  101. timestamp = parse(
  102. post_data.get(JELLYFIN_POST_KEYS.get("TIMESTAMP"), "")
  103. ).replace(tzinfo=pytz.utc)
  104. playback_position_seconds = int(
  105. post_data.get(JELLYFIN_POST_KEYS.get("PLAYBACK_POSITION_TICKS"), 1)
  106. / 10000000
  107. )
  108. if media_type == Scrobble.MediaType.VIDEO:
  109. media_obj = Video.get_from_imdb_id(
  110. post_data.get("Provider_imdb", "").replace("tt", "")
  111. )
  112. else:
  113. media_obj = Track.find_or_create(
  114. title=post_data.get("Name", ""),
  115. artist_name=post_data.get("Artist", ""),
  116. album_name=post_data.get("Album", ""),
  117. run_time_seconds=convert_to_seconds(
  118. post_data.get("RunTime", 900000)
  119. ),
  120. musicbrainz_id=post_data.get("Provider_musicbrainztrack", ""),
  121. )
  122. # A hack because we don't worry about updating music ... we either finish it or we don't
  123. playback_position_seconds = 0
  124. if not media_obj:
  125. logger.info(
  126. "[jellyfin_scrobble_media] no video found from POST data",
  127. extra={"post_data": post_data},
  128. )
  129. return
  130. playback_status = "resumed"
  131. if post_data.get("IsPaused"):
  132. playback_status = "paused"
  133. elif post_data.get("NotificationType") == "PlaybackStop":
  134. playback_status = "stopped"
  135. return media_obj.scrobble_for_user(
  136. user_id,
  137. source=post_data.get(JELLYFIN_POST_KEYS.get("SOURCE")),
  138. playback_position_seconds=playback_position_seconds,
  139. status=playback_status,
  140. )
  141. def web_scrobbler_scrobble_media(
  142. youtube_id: str, user_id: int, status: str = "started"
  143. ) -> Optional[Scrobble]:
  144. video = Video.get_from_youtube_id(youtube_id)
  145. return video.scrobble_for_user(user_id, status, source="Web Scrobbler")
  146. def manual_scrobble_video(
  147. video_id: str, user_id: int, action: Optional[str] = None
  148. ):
  149. if "tt" in video_id:
  150. video = Video.get_from_imdb_id(video_id)
  151. else:
  152. video = Video.get_from_youtube_id(video_id)
  153. # When manually scrobbling, try finding a source from the series
  154. source = "Vrobbler"
  155. if video.tv_series:
  156. source = video.tv_series.preferred_source
  157. scrobble_dict = {
  158. "user_id": user_id,
  159. "timestamp": timezone.now(),
  160. "playback_position_seconds": 0,
  161. "source": source,
  162. }
  163. logger.info(
  164. "[scrobblers] manual video scrobble request received",
  165. extra={
  166. "video_id": video.id,
  167. "user_id": user_id,
  168. "scrobble_dict": scrobble_dict,
  169. "media_type": Scrobble.MediaType.VIDEO,
  170. },
  171. )
  172. scrobble = Scrobble.create_or_update(video, user_id, scrobble_dict)
  173. if action == "stop":
  174. scrobble.stop(force_finish=True)
  175. return scrobble
  176. def manual_scrobble_event(
  177. thesportsdb_id: str, user_id: int, action: Optional[str] = None
  178. ):
  179. data_dict = lookup_event_from_thesportsdb(thesportsdb_id)
  180. event = SportEvent.find_or_create(data_dict)
  181. scrobble_dict = {
  182. "user_id": user_id,
  183. "timestamp": timezone.now(),
  184. "playback_position_seconds": 0,
  185. "source": "TheSportsDB",
  186. }
  187. return Scrobble.create_or_update(event, user_id, scrobble_dict)
  188. def manual_scrobble_video_game(
  189. hltb_id: str, user_id: int, action: Optional[str] = None
  190. ):
  191. game = VideoGame.objects.filter(hltb_id=hltb_id).first()
  192. if not game:
  193. data_dict = lookup_game_from_hltb(hltb_id)
  194. if not data_dict:
  195. logger.info(
  196. "[manual_scrobble_video_game] game not found on hltb",
  197. extra={
  198. "hltb_id": hltb_id,
  199. "user_id": user_id,
  200. "media_type": Scrobble.MediaType.VIDEO_GAME,
  201. },
  202. )
  203. return
  204. game = VideoGame.find_or_create(data_dict)
  205. scrobble_dict = {
  206. "user_id": user_id,
  207. "timestamp": timezone.now(),
  208. "playback_position_seconds": 0,
  209. "source": "Vrobbler",
  210. "long_play_complete": False,
  211. }
  212. logger.info(
  213. "[scrobblers] manual video game scrobble request received",
  214. extra={
  215. "videogame_id": game.id,
  216. "user_id": user_id,
  217. "scrobble_dict": scrobble_dict,
  218. "media_type": Scrobble.MediaType.VIDEO_GAME,
  219. },
  220. )
  221. return Scrobble.create_or_update(game, user_id, scrobble_dict)
  222. def manual_scrobble_book(
  223. title: str, user_id: int, action: Optional[str] = None
  224. ):
  225. book = Book.get_from_google(title)
  226. scrobble_dict = {
  227. "user_id": user_id,
  228. "timestamp": timezone.now(),
  229. "playback_position_seconds": 0,
  230. "source": "Vrobbler",
  231. "long_play_complete": False,
  232. }
  233. logger.info(
  234. "[scrobblers] manual book scrobble request received",
  235. extra={
  236. "book_id": book.id,
  237. "user_id": user_id,
  238. "scrobble_dict": scrobble_dict,
  239. "media_type": Scrobble.MediaType.BOOK,
  240. },
  241. )
  242. return Scrobble.create_or_update(book, user_id, scrobble_dict)
  243. def manual_scrobble_board_game(
  244. bggeek_id: str, user_id: int, action: Optional[str] = None
  245. ) -> Scrobble | None:
  246. boardgame = BoardGame.find_or_create(bggeek_id)
  247. if not boardgame:
  248. logger.error(f"No board game found for ID {bggeek_id}")
  249. return
  250. scrobble_dict = {
  251. "user_id": user_id,
  252. "timestamp": timezone.now(),
  253. "playback_position_seconds": 0,
  254. "source": "Vrobbler",
  255. }
  256. logger.info(
  257. "[vrobbler-scrobble] board game scrobble request received",
  258. extra={
  259. "boardgame_id": boardgame.id,
  260. "user_id": user_id,
  261. "scrobble_dict": scrobble_dict,
  262. "media_type": Scrobble.MediaType.BOARD_GAME,
  263. },
  264. )
  265. return Scrobble.create_or_update(boardgame, user_id, scrobble_dict)
  266. def find_and_enrich_board_game_data(game_dict: dict) -> BoardGame | None:
  267. """TODO Move this to a utility somewhere"""
  268. game = BoardGame.find_or_create(game_dict.get("bggId"))
  269. if game:
  270. game.cooperative = game_dict.get("cooperative", False)
  271. game.highest_wins = game_dict.get("highestWins", True)
  272. game.no_points = game_dict.get("noPoints", False)
  273. game.uses_teams = game_dict.get("useTeams", False)
  274. if not game.rating:
  275. game.rating = game_dict.get("rating") / 10
  276. game.save()
  277. if game_dict.get("designers"):
  278. for designer_name in game_dict.get("designers", "").split(", "):
  279. BoardGameDesigner.objects.get_or_create(name=designer_name)
  280. return game
  281. def email_scrobble_board_game(
  282. bgstat_data: dict[str, Any], user_id: int
  283. ) -> list[Scrobble]:
  284. game_list: list = bgstat_data.get("games", [])
  285. if not game_list:
  286. logger.info(
  287. "No game data from BG Stats, not scrobbling",
  288. extra={"bgstat_data": bgstat_data},
  289. )
  290. return []
  291. player_dict = {}
  292. for player in bgstat_data.get("players", []):
  293. if player.get("isAnonymous"):
  294. person, _created = Person.objects.get_or_create(name="Anonymous")
  295. else:
  296. person, _created = Person.objects.get_or_create(
  297. bgstats_id=player.get("uuid")
  298. )
  299. if not person.name:
  300. person.name = player.get("name", "")
  301. person.save()
  302. player_dict[player.get("id")] = person
  303. base_games = {}
  304. expansions = {}
  305. log_data = {}
  306. for game in game_list:
  307. logger.info(f"Finding and enriching {game.get('name')}")
  308. enriched_game = find_and_enrich_board_game_data(game)
  309. if game.get("isBaseGame"):
  310. base_games[game.get("id")] = enriched_game
  311. elif game.get("isExpansion"):
  312. expansions[game.get("id")] = enriched_game
  313. locations = {}
  314. for location_dict in bgstat_data.get("locations", []):
  315. location, _created = BoardGameLocation.objects.get_or_create(
  316. bgstats_id=location_dict.get("uuid")
  317. )
  318. update_fields = []
  319. if not location.name:
  320. location.name = location_dict.get("name")
  321. update_fields.append("name")
  322. geoloc = GeoLocation.objects.filter(
  323. title__icontains=location.name
  324. ).first()
  325. if geoloc:
  326. location.geo_location = geoloc
  327. update_fields.append("geo_location")
  328. if update_fields:
  329. location.save(update_fields=update_fields)
  330. locations[location_dict.get("id")] = location
  331. scrobbles_created = []
  332. for play_dict in bgstat_data.get("plays", []):
  333. log_data["expansion_ids"] = []
  334. try:
  335. base_game = base_games[play_dict.get("gameRefId")]
  336. except KeyError:
  337. try:
  338. base_game = expansions[play_dict.get("gameRefId")]
  339. except KeyError:
  340. print(play_dict)
  341. logger.info(
  342. "Skipping scrobble of play, can't find game",
  343. extra={"play_dict": play_dict},
  344. )
  345. continue
  346. for eplay in play_dict.get("expansionPlays", []):
  347. expansion = expansions[eplay.get("gameRefId")]
  348. expansion.expansion_for_boardgame = base_game
  349. expansion.save()
  350. log_data["expansion_ids"].append(expansion.id)
  351. if play_dict.get("locationRefId", False):
  352. log_data["location_id"] = locations[
  353. play_dict.get("locationRefId")
  354. ].id
  355. if play_dict.get("rounds", False):
  356. log_data["rounds"] = play_dict.get("rounds")
  357. if play_dict.get("board", False):
  358. log_data["board"] = play_dict.get("board")
  359. log_data["players"] = []
  360. for score_dict in play_dict.get("playerScores", []):
  361. log_data["players"].append(
  362. {
  363. "person_id": player_dict[score_dict.get("playerRefId")].id,
  364. "new": score_dict.get("newPlayer"),
  365. "win": score_dict.get("winner"),
  366. "score": score_dict.get("score"),
  367. "rank": score_dict.get("rank"),
  368. "seat_order": score_dict.get("seatOrder"),
  369. "role": score_dict.get("role"),
  370. }
  371. )
  372. start = parse(play_dict.get("playDate"))
  373. if play_dict.get("durationMin") > 0:
  374. duration_seconds = play_dict.get("durationMin") * 60
  375. else:
  376. duration_seconds = base_game.run_time_seconds
  377. stop = start + timedelta(seconds=duration_seconds)
  378. scrobble_dict = {
  379. "user_id": user_id,
  380. "timestamp": start,
  381. "playback_position_seconds": duration_seconds,
  382. "source": "BG Stats",
  383. "log": log_data,
  384. }
  385. print(scrobble_dict)
  386. scrobble = Scrobble.create_or_update(base_game, user_id, scrobble_dict)
  387. scrobble.stop_timestamp = stop
  388. scrobble.in_progress = False
  389. scrobble.played_to_completion = True
  390. scrobble.save()
  391. scrobbles_created.append(scrobble)
  392. return scrobbles_created
  393. def manual_scrobble_from_url(
  394. url: str, user_id: int, action: Optional[str] = None
  395. ) -> Scrobble:
  396. """We have scrobblable media URLs, and then any other webpages that
  397. we want to scrobble as a media type in and of itself. This checks whether
  398. we know about the content type, and routes it to the appropriate media
  399. scrobbler. Otherwise, return nothing."""
  400. content_key = ""
  401. domain = extract_domain(url)
  402. for key, content_urls in SCROBBLE_CONTENT_URLS.items():
  403. for content_url in content_urls:
  404. if domain in content_url:
  405. content_key = key
  406. item_id = None
  407. if not content_key:
  408. content_key = "-w"
  409. item_id = url
  410. # Try generic search for any URL with digit-based IDs
  411. if not item_id:
  412. try:
  413. item_id = re.findall(r"\d+", url)[0]
  414. except IndexError:
  415. pass
  416. if content_key == "-i" and "v=" in url:
  417. item_id = url.split("v=")[1].split("&")[0]
  418. elif content_key == "-i" and "title/tt" in url:
  419. item_id = "tt" + str(item_id)
  420. scrobble_fn = MANUAL_SCROBBLE_FNS[content_key]
  421. return eval(scrobble_fn)(item_id, user_id, action=action)
  422. def todoist_scrobble_task_finish(
  423. todoist_task: dict, user_id: int, timestamp: datetime
  424. ) -> Optional[Scrobble]:
  425. scrobble = Scrobble.objects.filter(
  426. user_id=user_id,
  427. log__todoist_id=todoist_task.get("todoist_id"),
  428. in_progress=True,
  429. played_to_completion=False,
  430. ).first()
  431. if not scrobble:
  432. logger.info(
  433. "[todoist_scrobble_task_finish] todoist webhook finish called on missing task"
  434. )
  435. return
  436. scrobble.stop(timestamp=timestamp, force_finish=True)
  437. return scrobble
  438. def todoist_scrobble_update_task(
  439. todoist_note: dict, user_id: int
  440. ) -> Optional[Scrobble]:
  441. scrobble = Scrobble.objects.filter(
  442. in_progress=True,
  443. user_id=user_id,
  444. log__todoist_id=todoist_note.get("task_id"),
  445. ).first()
  446. if not scrobble:
  447. logger.info(
  448. "[todoist_scrobble_update_task] no task found",
  449. extra={
  450. "todoist_note": todoist_note,
  451. "user_id": user_id,
  452. "media_type": Scrobble.MediaType.TASK,
  453. },
  454. )
  455. return
  456. existing_notes = scrobble.log.get("notes", {})
  457. existing_notes[todoist_note.get("todoist_id")] = todoist_note.get("notes")
  458. scrobble.log["notes"] = existing_notes
  459. scrobble.save(update_fields=["log"])
  460. logger.info(
  461. "[todoist_scrobble_update_task] todoist note added",
  462. extra={
  463. "todoist_note": todoist_note,
  464. "user_id": user_id,
  465. "media_type": Scrobble.MediaType.TASK,
  466. },
  467. )
  468. return scrobble
  469. def todoist_scrobble_task(
  470. todoist_task: dict,
  471. user_id: int,
  472. started: bool = False,
  473. stopped: bool = False,
  474. user_context_list: list[str] = [],
  475. ) -> Scrobble:
  476. title = get_title_from_labels(
  477. todoist_task.get("todoist_label_list", []), user_context_list
  478. )
  479. task = Task.find_or_create(title)
  480. timestamp = pendulum.parse(todoist_task.get("updated_at", timezone.now()))
  481. in_progress_scrobble = Scrobble.objects.filter(
  482. user_id=user_id,
  483. in_progress=True,
  484. log__todoist_id=todoist_task.get("todoist_id"),
  485. task=task,
  486. ).last()
  487. if not in_progress_scrobble and stopped:
  488. logger.info(
  489. "[todoist_scrobble_task] cannot stop already stopped task",
  490. extra={
  491. "todoist_type": todoist_task["todoist_type"],
  492. "todoist_event": todoist_task["todoist_event"],
  493. "todoist_id": todoist_task["todoist_id"],
  494. },
  495. )
  496. return
  497. if in_progress_scrobble and started:
  498. logger.info(
  499. "[todoist_scrobble_task] cannot start already started task",
  500. extra={
  501. "todoist_type": todoist_task["todoist_type"],
  502. "todoist_event": todoist_task["todoist_event"],
  503. "todoist_id": todoist_task["todoist_id"],
  504. },
  505. )
  506. return in_progress_scrobble
  507. # Finish an in-progress scrobble
  508. if in_progress_scrobble and stopped:
  509. logger.info(
  510. "[todoist_scrobble_task] finishing",
  511. extra={
  512. "todoist_type": todoist_task["todoist_type"],
  513. "todoist_event": todoist_task["todoist_event"],
  514. "todoist_id": todoist_task["todoist_id"],
  515. },
  516. )
  517. return todoist_scrobble_task_finish(todoist_task, user_id, timestamp)
  518. # Default to create new scrobble "if not in_progress_scrobble and in_progress_in_todoist"
  519. # TODO Should use updated_at from TOdoist, but parsing isn't working
  520. scrobble_dict = {
  521. "user_id": user_id,
  522. "timestamp": timestamp,
  523. "playback_position_seconds": 0,
  524. "source": "Todoist",
  525. "log": todoist_task,
  526. }
  527. logger.info(
  528. "[todoist_scrobble_task] creating",
  529. extra={
  530. "task_id": task.id,
  531. "user_id": user_id,
  532. "scrobble_dict": scrobble_dict,
  533. "media_type": Scrobble.MediaType.TASK,
  534. },
  535. )
  536. scrobble = Scrobble.create_or_update(task, user_id, scrobble_dict)
  537. return scrobble
  538. def emacs_scrobble_update_task(
  539. emacs_id: str, emacs_notes: dict, user_id: int
  540. ) -> Optional[Scrobble]:
  541. scrobble = Scrobble.objects.filter(
  542. in_progress=True,
  543. user_id=user_id,
  544. log__source_id=emacs_id,
  545. log__source="orgmode",
  546. ).first()
  547. if not scrobble:
  548. logger.info(
  549. "[emacs_scrobble_update_task] no task found",
  550. extra={
  551. "emacs_notes": emacs_notes,
  552. "user_id": user_id,
  553. "media_type": Scrobble.MediaType.TASK,
  554. },
  555. )
  556. return
  557. notes_updated = False
  558. for note in emacs_notes:
  559. existing_note_ts = [
  560. n.get("timestamp") for n in scrobble.log.get("notes", [])
  561. ]
  562. if not scrobble.log.get('notes"'):
  563. scrobble.log["notes"] = []
  564. if note.get("timestamp") not in existing_note_ts:
  565. scrobble.log["notes"].append(
  566. {note.get("timestamp"): note.get("content")}
  567. )
  568. notes_updated = True
  569. if notes_updated:
  570. scrobble.save(update_fields=["log"])
  571. logger.info(
  572. "[emacs_scrobble_update_task] emacs note added",
  573. extra={
  574. "emacs_note": emacs_notes,
  575. "user_id": user_id,
  576. "media_type": Scrobble.MediaType.TASK,
  577. },
  578. )
  579. return scrobble
  580. def emacs_scrobble_task(
  581. task_data: dict,
  582. user_id: int,
  583. started: bool = False,
  584. stopped: bool = False,
  585. user_context_list: list[str] = [],
  586. ) -> Scrobble | None:
  587. source_id = task_data.get("source_id")
  588. title = get_title_from_labels(
  589. task_data.get("labels", []), user_context_list
  590. )
  591. task = Task.find_or_create(title)
  592. timestamp = pendulum.parse(task_data.get("updated_at", timezone.now()))
  593. in_progress_scrobble = Scrobble.objects.filter(
  594. user_id=user_id,
  595. in_progress=True,
  596. log__source_id=source_id,
  597. log__source="orgmode",
  598. task=task,
  599. ).last()
  600. if not in_progress_scrobble and stopped:
  601. logger.info(
  602. "[emacs_scrobble_task] cannot stop already stopped task",
  603. extra={
  604. "emacs_id": source_id,
  605. },
  606. )
  607. return
  608. if in_progress_scrobble and started:
  609. logger.info(
  610. "[emacs_scrobble_task] cannot start already started task",
  611. extra={
  612. "emacs_id": source_id,
  613. },
  614. )
  615. return in_progress_scrobble
  616. # Finish an in-progress scrobble
  617. if in_progress_scrobble and stopped:
  618. logger.info(
  619. "[emacs_scrobble_task] finishing",
  620. extra={
  621. "emacs_id": source_id,
  622. },
  623. )
  624. in_progress_scrobble.stop(timestamp=timestamp, force_finish=True)
  625. return in_progress_scrobble
  626. if in_progress_scrobble:
  627. return in_progress_scrobble
  628. notes = task_data.pop("notes")
  629. if notes:
  630. task_data["notes"] = []
  631. for note in notes:
  632. task_data["notes"].append(
  633. {note.get("timestamp"): note.get("content")}
  634. )
  635. scrobble_dict = {
  636. "user_id": user_id,
  637. "timestamp": timestamp,
  638. "playback_position_seconds": 0,
  639. "source": "Org-mode",
  640. "log": task_data,
  641. }
  642. logger.info(
  643. "[emacs_scrobble_task] creating",
  644. extra={
  645. "task_id": task.id,
  646. "user_id": user_id,
  647. "scrobble_dict": scrobble_dict,
  648. "media_type": Scrobble.MediaType.TASK,
  649. },
  650. )
  651. scrobble = Scrobble.create_or_update(task, user_id, scrobble_dict)
  652. return scrobble
  653. def manual_scrobble_task(url: str, user_id: int, action: Optional[str] = None):
  654. source_id = re.findall(r"\d+", url)[0]
  655. if "todoist" in url:
  656. source = "Todoist"
  657. title = "Generic Todoist task"
  658. description = " ".join(url.split("/")[-1].split("-")[:-1]).capitalize()
  659. task = Task.find_or_create(title)
  660. scrobble_dict = {
  661. "user_id": user_id,
  662. "timestamp": timezone.now(),
  663. "playback_position_seconds": 0,
  664. "source": source,
  665. "log": {"description": description, "source_id": source_id},
  666. }
  667. logger.info(
  668. "[vrobbler-scrobble] webpage scrobble request received",
  669. extra={
  670. "task_id": task.id,
  671. "user_id": user_id,
  672. "scrobble_dict": scrobble_dict,
  673. "media_type": Scrobble.MediaType.WEBPAGE,
  674. },
  675. )
  676. scrobble = Scrobble.create_or_update(task, user_id, scrobble_dict)
  677. return scrobble
  678. def manual_scrobble_webpage(
  679. url: str, user_id: int, action: Optional[str] = None
  680. ):
  681. webpage = WebPage.find_or_create({"url": url})
  682. scrobble_dict = {
  683. "user_id": user_id,
  684. "timestamp": timezone.now(),
  685. "playback_position_seconds": 0,
  686. "source": "Vrobbler",
  687. }
  688. logger.info(
  689. "[vrobbler-scrobble] webpage scrobble request received",
  690. extra={
  691. "webpage_id": webpage.id,
  692. "user_id": user_id,
  693. "scrobble_dict": scrobble_dict,
  694. "media_type": Scrobble.MediaType.WEBPAGE,
  695. },
  696. )
  697. scrobble = Scrobble.create_or_update(webpage, user_id, scrobble_dict)
  698. # possibly async this?
  699. scrobble.push_to_archivebox()
  700. return scrobble
  701. def gpslogger_scrobble_location(data_dict: dict, user_id: int) -> Scrobble:
  702. location = GeoLocation.find_or_create(data_dict)
  703. timestamp = pendulum.parse(data_dict.get("time", timezone.now()))
  704. extra_data = {
  705. "user_id": user_id,
  706. "timestamp": timestamp,
  707. "source": "GPSLogger",
  708. "media_type": Scrobble.MediaType.GEO_LOCATION,
  709. }
  710. scrobble = Scrobble.create_or_update_location(
  711. location,
  712. extra_data,
  713. user_id,
  714. )
  715. provider = LOCATION_PROVIDERS[data_dict.get("prov")]
  716. if "gps_updates" not in scrobble.log.keys():
  717. scrobble.log["gps_updates"] = []
  718. scrobble.log["gps_updates"].append(
  719. {
  720. "timestamp": data_dict.get("time"),
  721. "position_provider": provider,
  722. }
  723. )
  724. if scrobble.timestamp:
  725. scrobble.playback_position_seconds = (
  726. timezone.now() - scrobble.timestamp
  727. ).seconds
  728. scrobble.save(update_fields=["log", "playback_position_seconds"])
  729. logger.info(
  730. "[gpslogger_webhook] gpslogger scrobble request received",
  731. extra={
  732. "scrobble_id": scrobble.id,
  733. "provider": provider,
  734. "user_id": user_id,
  735. "timestamp": extra_data.get("timestamp"),
  736. "raw_timestamp": data_dict.get("time"),
  737. "media_type": Scrobble.MediaType.GEO_LOCATION,
  738. },
  739. )
  740. return scrobble
  741. def web_scrobbler_scrobble_video_or_song(
  742. data_dict: dict, user_id: Optional[int]
  743. ) -> Scrobble:
  744. # We're not going to create music tracks, because the only time
  745. # we'd hit this is if we're listening to a concert or something.
  746. artist_name = data_dict.get("artist")
  747. track_name = data_dict.get("track")
  748. tracks = Track.objects.filter(
  749. artist__name=data_dict.get("artist"), title=data_dict.get("track")
  750. )
  751. if tracks.count() > 1:
  752. logger.warning(
  753. "Multiple tracks found for Web Scrobbler",
  754. extra={"artist": artist_name, "track": track_name},
  755. )
  756. track = tracks.first()
  757. # No track found, create a Video
  758. if not track:
  759. Video.get_from_youtube_id()
  760. # Now we run off a scrobble
  761. mopidy_data = {
  762. "user_id": user_id,
  763. "timestamp": timezone.now(),
  764. "playback_position_seconds": data_dict.get("playback_time_ticks"),
  765. "source": "Mopidy",
  766. "mopidy_status": data_dict.get("status"),
  767. }
  768. logger.info(
  769. "[scrobblers] webhook mopidy scrobble request received",
  770. extra={
  771. "episode_id": episode.id if episode else None,
  772. "user_id": user_id,
  773. "scrobble_dict": mopidy_data,
  774. "media_type": Scrobble.MediaType.PODCAST_EPISODE,
  775. },
  776. )
  777. scrobble = None
  778. if episode:
  779. scrobble = Scrobble.create_or_update(episode, user_id, mopidy_data)
  780. return scrobble
  781. def manual_scrobble_beer(
  782. untappd_id: str, user_id: int, action: Optional[str] = None
  783. ):
  784. beer = Beer.find_or_create(untappd_id)
  785. if not beer:
  786. logger.error(f"No beer found for Untappd ID {untappd_id}")
  787. return
  788. scrobble_dict = {
  789. "user_id": user_id,
  790. "timestamp": timezone.now(),
  791. "playback_position_seconds": 0,
  792. "source": "Vrobbler",
  793. }
  794. logger.info(
  795. "[vrobbler-scrobble] beer scrobble request received",
  796. extra={
  797. "beer_id": beer.id,
  798. "user_id": user_id,
  799. "scrobble_dict": scrobble_dict,
  800. "media_type": Scrobble.MediaType.BEER,
  801. },
  802. )
  803. # TODO Kick out a process to enrich the media here, and in every scrobble event
  804. return Scrobble.create_or_update(beer, user_id, scrobble_dict)
  805. def manual_scrobble_puzzle(
  806. ipdb_id: str, user_id: int, action: Optional[str] = None
  807. ):
  808. puzzle = Puzzle.find_or_create(ipdb_id)
  809. if not puzzle:
  810. logger.error(f"No puzzle found for IPDB ID {ipdb_id}")
  811. return
  812. scrobble_dict = {
  813. "user_id": user_id,
  814. "timestamp": timezone.now(),
  815. "playback_position_seconds": 0,
  816. "source": "Vrobbler",
  817. }
  818. logger.info(
  819. "[vrobbler-scrobble] puzzle scrobble request received",
  820. extra={
  821. "puzzle_id": puzzle.id,
  822. "user_id": user_id,
  823. "scrobble_dict": scrobble_dict,
  824. "media_type": Scrobble.MediaType.PUZZLE,
  825. },
  826. )
  827. # TODO Kick out a process to enrich the media here, and in every scrobble event
  828. return Scrobble.create_or_update(puzzle, user_id, scrobble_dict)
  829. def manual_scrobble_brickset(
  830. brickset_id: str, user_id: int, action: Optional[str] = None
  831. ):
  832. brickset = BrickSet.find_or_create(brickset_id)
  833. if not brickset:
  834. logger.error(f"No brickset found for Brickset ID {brickset_id}")
  835. return
  836. scrobble_dict = {
  837. "user_id": user_id,
  838. "timestamp": timezone.now(),
  839. "playback_position_seconds": 0,
  840. "source": "Vrobbler",
  841. "log": {"serial_scrobble_id": ""},
  842. }
  843. logger.info(
  844. "[vrobbler-scrobble] brickset scrobble request received",
  845. extra={
  846. "brickset_id": brickset.id,
  847. "user_id": user_id,
  848. "scrobble_dict": scrobble_dict,
  849. "media_type": Scrobble.MediaType.BRICKSET,
  850. },
  851. )
  852. # TODO Kick out a process to enrich the media here, and in every scrobble event
  853. # TODO Need to check for past scrobbles and auto populate serial scrobble id if possible
  854. return Scrobble.create_or_update(brickset, user_id, scrobble_dict)