scrobblers.py 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112
  1. import logging
  2. import re
  3. from datetime import datetime, timedelta
  4. from typing import Any, Optional
  5. import pendulum
  6. import pytz
  7. from beers.models import Beer
  8. from boardgames.models import BoardGame, BoardGameDesigner, BoardGameLocation
  9. from books.constants import READCOMICSONLINE_URL
  10. from books.models import Book, BookLogData, BookPageLogData
  11. from books.utils import parse_readcomicsonline_uri
  12. from bricksets.models import BrickSet
  13. from dateutil.parser import parse
  14. from django.utils import timezone
  15. from locations.constants import LOCATION_PROVIDERS
  16. from locations.models import GeoLocation
  17. from music.constants import JELLYFIN_POST_KEYS, MOPIDY_POST_KEYS
  18. from music.models import Track
  19. from people.models import Person
  20. from podcasts.models import PodcastEpisode
  21. from podcasts.utils import parse_mopidy_uri
  22. from profiles.models import UserProfile
  23. from puzzles.models import Puzzle
  24. from scrobbles.constants import (
  25. JELLYFIN_AUDIO_ITEM_TYPES,
  26. MANUAL_SCROBBLE_FNS,
  27. SCROBBLE_CONTENT_URLS,
  28. )
  29. from scrobbles.models import Scrobble
  30. from scrobbles.notifications import ScrobbleNtfyNotification
  31. from scrobbles.utils import (
  32. convert_to_seconds,
  33. extract_domain,
  34. remove_last_part,
  35. next_url_if_exists,
  36. )
  37. from sports.models import SportEvent
  38. from sports.thesportsdb import lookup_event_from_thesportsdb
  39. from tasks.models import Task
  40. from tasks.utils import get_title_from_labels
  41. from videogames.howlongtobeat import lookup_game_from_hltb
  42. from videogames.models import VideoGame
  43. from videos.models import Video
  44. from webpages.models import WebPage
  45. logger = logging.getLogger(__name__)
  46. def mopidy_scrobble_media(post_data: dict, user_id: int) -> Scrobble:
  47. media_type = Scrobble.MediaType.TRACK
  48. if "podcast" in post_data.get("mopidy_uri", ""):
  49. media_type = Scrobble.MediaType.PODCAST_EPISODE
  50. logger.info(
  51. "[mopidy_webhook] called",
  52. extra={
  53. "user_id": user_id,
  54. "post_data": post_data,
  55. "media_type": media_type,
  56. },
  57. )
  58. if media_type == Scrobble.MediaType.PODCAST_EPISODE:
  59. parsed_data = parse_mopidy_uri(post_data.get("mopidy_uri", ""))
  60. if not parsed_data:
  61. logger.warning("Tried to scrobble podcast but no uri found", extra={"post_data": post_data})
  62. return Scrobble()
  63. media_obj = PodcastEpisode.find_or_create(**parsed_data)
  64. else:
  65. media_obj = Track.find_or_create(
  66. title=post_data.get("name", ""),
  67. artist_name=post_data.get("artist", ""),
  68. album_name=post_data.get("album", ""),
  69. run_time_seconds=post_data.get("run_time", 900000),
  70. )
  71. log = {}
  72. try:
  73. log = {"mopidy_source": post_data.get("mopidy_uri", "").split(":")[0]}
  74. except IndexError:
  75. pass
  76. return media_obj.scrobble_for_user(
  77. user_id,
  78. source="Mopidy",
  79. playback_position_seconds=int(
  80. post_data.get(MOPIDY_POST_KEYS.get("PLAYBACK_POSITION_TICKS"), 1)
  81. / 1000
  82. ),
  83. status=post_data.get(MOPIDY_POST_KEYS.get("STATUS"), ""),
  84. log=log,
  85. )
  86. def jellyfin_scrobble_media(
  87. post_data: dict, user_id: int
  88. ) -> Optional[Scrobble]:
  89. media_type = Scrobble.MediaType.VIDEO
  90. if post_data.pop("ItemType", "") in JELLYFIN_AUDIO_ITEM_TYPES:
  91. media_type = Scrobble.MediaType.TRACK
  92. null_position_on_progress = (
  93. post_data.get("PlaybackPosition") == "00:00:00"
  94. and post_data.get("NotificationType") == "PlaybackProgress"
  95. )
  96. # Jellyfin has some race conditions with it's webhooks, these hacks fix some of them
  97. if null_position_on_progress:
  98. logger.info(
  99. "[jellyfin_scrobble_media] no playback position tick, aborting",
  100. extra={"post_data": post_data},
  101. )
  102. return
  103. timestamp = parse(
  104. post_data.get(JELLYFIN_POST_KEYS.get("TIMESTAMP"), "")
  105. ).replace(tzinfo=pytz.utc)
  106. playback_position_seconds = int(
  107. post_data.get(JELLYFIN_POST_KEYS.get("PLAYBACK_POSITION_TICKS"), 1)
  108. / 10000000
  109. )
  110. if media_type == Scrobble.MediaType.VIDEO:
  111. media_obj = Video.get_from_imdb_id(
  112. post_data.get("Provider_imdb", "").replace("tt", "")
  113. )
  114. else:
  115. media_obj = Track.find_or_create(
  116. title=post_data.get("Name", ""),
  117. artist_name=post_data.get("Artist", ""),
  118. album_name=post_data.get("Album", ""),
  119. run_time_seconds=convert_to_seconds(
  120. post_data.get("RunTime", 900000)
  121. ),
  122. )
  123. # A hack because we don't worry about updating music ... we either finish it or we don't
  124. playback_position_seconds = 0
  125. if not media_obj:
  126. logger.info(
  127. "[jellyfin_scrobble_media] no video found from POST data",
  128. extra={"post_data": post_data},
  129. )
  130. return
  131. playback_status = "resumed"
  132. if post_data.get("IsPaused"):
  133. playback_status = "paused"
  134. elif post_data.get("NotificationType") == "PlaybackStop":
  135. playback_status = "stopped"
  136. return media_obj.scrobble_for_user(
  137. user_id,
  138. source=post_data.get(JELLYFIN_POST_KEYS.get("SOURCE")),
  139. playback_position_seconds=playback_position_seconds,
  140. status=playback_status,
  141. )
  142. def web_scrobbler_scrobble_media(
  143. youtube_id: str, user_id: int, status: str = "started"
  144. ) -> Optional[Scrobble]:
  145. video = Video.get_from_youtube_id(youtube_id)
  146. return video.scrobble_for_user(user_id, status, source="Web Scrobbler")
  147. def manual_scrobble_video(
  148. video_id: str, user_id: int, action: Optional[str] = None
  149. ):
  150. if "tt" in video_id:
  151. video = Video.get_from_imdb_id(video_id)
  152. else:
  153. video = Video.get_from_youtube_id(video_id)
  154. # When manually scrobbling, try finding a source from the series
  155. source = "Vrobbler"
  156. if video.tv_series:
  157. source = video.tv_series.preferred_source
  158. scrobble_dict = {
  159. "user_id": user_id,
  160. "timestamp": timezone.now(),
  161. "playback_position_seconds": 0,
  162. "source": source,
  163. }
  164. logger.info(
  165. "[scrobblers] manual video scrobble request received",
  166. extra={
  167. "video_id": video.id,
  168. "user_id": user_id,
  169. "scrobble_dict": scrobble_dict,
  170. "media_type": Scrobble.MediaType.VIDEO,
  171. },
  172. )
  173. scrobble = Scrobble.create_or_update(video, user_id, scrobble_dict)
  174. if action == "stop":
  175. scrobble.stop(force_finish=True)
  176. return scrobble
  177. def manual_scrobble_event(
  178. thesportsdb_id: str, user_id: int, action: Optional[str] = None
  179. ):
  180. data_dict = lookup_event_from_thesportsdb(thesportsdb_id)
  181. event = SportEvent.find_or_create(data_dict)
  182. scrobble_dict = {
  183. "user_id": user_id,
  184. "timestamp": timezone.now(),
  185. "playback_position_seconds": 0,
  186. "source": "TheSportsDB",
  187. }
  188. return Scrobble.create_or_update(event, user_id, scrobble_dict)
  189. def manual_scrobble_video_game(
  190. hltb_id: str, user_id: int, action: Optional[str] = None
  191. ):
  192. game = VideoGame.objects.filter(hltb_id=hltb_id).first()
  193. if not game:
  194. data_dict = lookup_game_from_hltb(hltb_id)
  195. if not data_dict:
  196. logger.info(
  197. "[manual_scrobble_video_game] game not found on hltb",
  198. extra={
  199. "hltb_id": hltb_id,
  200. "user_id": user_id,
  201. "media_type": Scrobble.MediaType.VIDEO_GAME,
  202. },
  203. )
  204. return
  205. game = VideoGame.find_or_create(data_dict)
  206. scrobble_dict = {
  207. "user_id": user_id,
  208. "timestamp": timezone.now(),
  209. "playback_position_seconds": 0,
  210. "source": "Vrobbler",
  211. "long_play_complete": False,
  212. }
  213. logger.info(
  214. "[scrobblers] manual video game scrobble request received",
  215. extra={
  216. "videogame_id": game.id,
  217. "user_id": user_id,
  218. "scrobble_dict": scrobble_dict,
  219. "media_type": Scrobble.MediaType.VIDEO_GAME,
  220. },
  221. )
  222. return Scrobble.create_or_update(game, user_id, scrobble_dict)
  223. def manual_scrobble_book(
  224. title: str, user_id: int, action: Optional[str] = None
  225. ):
  226. log = {}
  227. source = "Vrobbler"
  228. page = None
  229. url = ""
  230. if READCOMICSONLINE_URL in title:
  231. url = title
  232. title, volume, page = parse_readcomicsonline_uri(title)
  233. if not title:
  234. logger.info(
  235. "[scrobblers] manual book scrobble request failed",
  236. extra={
  237. "title": title,
  238. "user_id": user_id,
  239. "media_type": Scrobble.MediaType.BOOK,
  240. },
  241. )
  242. return
  243. title = f"{title} - Issue {volume}"
  244. if not page:
  245. page = 1
  246. logger.info("[scrobblers] Book page included in scrobble, should update!")
  247. source = READCOMICSONLINE_URL.replace("https://", "")
  248. # TODO: Check for scrobble of this book already and if so, update the page count
  249. book = Book.find_or_create(title, url=url, enrich=True)
  250. scrobble_dict = {
  251. "user_id": user_id,
  252. "timestamp": timezone.now(),
  253. "playback_position_seconds": 0,
  254. "source": source,
  255. "long_play_complete": False,
  256. }
  257. logger.info(
  258. "[scrobblers] manual book scrobble request received",
  259. extra={
  260. "book_id": book.id,
  261. "user_id": user_id,
  262. "scrobble_dict": scrobble_dict,
  263. "media_type": Scrobble.MediaType.BOOK,
  264. },
  265. )
  266. scrobble = Scrobble.create_or_update(book, user_id, scrobble_dict, read_log_page=page)
  267. if action == "stop":
  268. if url:
  269. if isinstance(scrobble.log, "BookLogData"):
  270. scrobble.log.resume_url = next_url_if_exists(url)
  271. else:
  272. scrobble.log["resume_url"] = next_url_if_exists(url)
  273. scrobble.save(update_fields=["log"])
  274. scrobble.stop(force_finish=True)
  275. return scrobble
  276. def manual_scrobble_board_game(
  277. bggeek_id: str, user_id: int, action: Optional[str] = None
  278. ) -> Scrobble | None:
  279. boardgame = BoardGame.find_or_create(bggeek_id)
  280. if not boardgame:
  281. logger.error(f"No board game found for ID {bggeek_id}")
  282. return
  283. scrobble_dict = {
  284. "user_id": user_id,
  285. "timestamp": timezone.now(),
  286. "playback_position_seconds": 0,
  287. "source": "Vrobbler",
  288. }
  289. logger.info(
  290. "[vrobbler-scrobble] board game scrobble request received",
  291. extra={
  292. "boardgame_id": boardgame.id,
  293. "user_id": user_id,
  294. "scrobble_dict": scrobble_dict,
  295. "media_type": Scrobble.MediaType.BOARD_GAME,
  296. },
  297. )
  298. return Scrobble.create_or_update(boardgame, user_id, scrobble_dict)
  299. def find_and_enrich_board_game_data(game_dict: dict) -> BoardGame | None:
  300. """TODO Move this to a utility somewhere"""
  301. game = BoardGame.find_or_create(game_dict.get("bggId"))
  302. if game:
  303. game.cooperative = game_dict.get("cooperative", False)
  304. game.highest_wins = game_dict.get("highestWins", True)
  305. game.no_points = game_dict.get("noPoints", False)
  306. game.uses_teams = game_dict.get("useTeams", False)
  307. game.bgstats_id = game_dict.get("uuid", None)
  308. if not game.rating:
  309. game.rating = game_dict.get("rating") / 10
  310. game.save()
  311. if game_dict.get("designers"):
  312. for designer_name in game_dict.get("designers", "").split(", "):
  313. designer, created = BoardGameDesigner.objects.get_or_create(
  314. name=designer_name
  315. )
  316. game.designers.add(designer.id)
  317. return game
  318. def email_scrobble_board_game(
  319. bgstat_data: dict[str, Any], user_id: int
  320. ) -> list[Scrobble]:
  321. game_list: list = bgstat_data.get("games", [])
  322. if not game_list:
  323. logger.info(
  324. "No game data from BG Stats, not scrobbling",
  325. extra={"bgstat_data": bgstat_data},
  326. )
  327. return []
  328. player_dict = {}
  329. for player in bgstat_data.get("players", []):
  330. if player.get("isAnonymous"):
  331. person, _created = Person.objects.get_or_create(name="Anonymous")
  332. else:
  333. person, _created = Person.objects.get_or_create(
  334. bgstats_id=player.get("uuid")
  335. )
  336. if not person.name:
  337. person.name = player.get("name", "")
  338. person.save()
  339. player_dict[player.get("id")] = person
  340. base_games = {}
  341. expansions = {}
  342. log_data = {}
  343. for game in game_list:
  344. logger.info(f"Finding and enriching {game.get('name')}")
  345. enriched_game = find_and_enrich_board_game_data(game)
  346. if game.get("isBaseGame"):
  347. base_games[game.get("id")] = enriched_game
  348. if game.get("isExpansion"):
  349. expansions[game.get("id")] = enriched_game
  350. locations = {}
  351. for location_dict in bgstat_data.get("locations", []):
  352. location, _created = BoardGameLocation.objects.get_or_create(
  353. bgstats_id=location_dict.get("uuid")
  354. )
  355. update_fields = []
  356. if not location.name:
  357. location.name = location_dict.get("name")
  358. update_fields.append("name")
  359. geoloc = GeoLocation.objects.filter(
  360. title__icontains=location.name
  361. ).first()
  362. if geoloc:
  363. location.geo_location = geoloc
  364. update_fields.append("geo_location")
  365. if update_fields:
  366. location.save(update_fields=update_fields)
  367. locations[location_dict.get("id")] = location
  368. scrobbles_created = []
  369. second = 0
  370. for play_dict in bgstat_data.get("plays", []):
  371. hour = None
  372. minute = None
  373. second = None
  374. if "comments" in play_dict.keys():
  375. for line in play_dict.get("comments", "").split("\n"):
  376. if "Learning to play" in line:
  377. log_data["learning"] = True
  378. if "Start time:" in line:
  379. start_time = line.split(": ")[1]
  380. pieces = start_time.split(":")
  381. hour = int(pieces[0])
  382. minute = int(pieces[1])
  383. try:
  384. second = int(pieces[2])
  385. except IndexError:
  386. second = 0
  387. log_data["notes"] = [play_dict.get("comments")]
  388. log_data["expansion_ids"] = []
  389. try:
  390. base_game = base_games[play_dict.get("gameRefId")]
  391. except KeyError:
  392. try:
  393. base_game = expansions[play_dict.get("gameRefId")]
  394. except KeyError:
  395. logger.info(
  396. "Skipping scrobble of play, can't find game",
  397. extra={"play_dict": play_dict},
  398. )
  399. continue
  400. for eplay in play_dict.get("expansionPlays", []):
  401. expansion = expansions[eplay.get("gameRefId")]
  402. expansion.expansion_for_boardgame = base_game
  403. expansion.save()
  404. log_data["expansion_ids"].append(expansion.id)
  405. if log_data.get("expansion_ids") == []:
  406. log_data.pop("expansion_ids")
  407. if play_dict.get("locationRefId", False):
  408. log_data["location_id"] = locations[
  409. play_dict.get("locationRefId")
  410. ].id
  411. if play_dict.get("rounds", False):
  412. log_data["rounds"] = play_dict.get("rounds")
  413. if play_dict.get("board", False):
  414. log_data["board"] = play_dict.get("board")
  415. log_data["players"] = []
  416. for score_dict in play_dict.get("playerScores", []):
  417. log_data["players"].append(
  418. {
  419. "person_id": player_dict[score_dict.get("playerRefId")].id,
  420. "new": score_dict.get("newPlayer"),
  421. "win": score_dict.get("winner"),
  422. "score": score_dict.get("score"),
  423. "rank": score_dict.get("rank"),
  424. "seat_order": score_dict.get("seatOrder"),
  425. "role": score_dict.get("role"),
  426. }
  427. )
  428. timestamp = parse(play_dict.get("playDate"))
  429. if hour and minute:
  430. logger.info(f"Scrobble playDate has manual start time {timestamp}")
  431. timestamp = timestamp.replace(
  432. hour=hour, minute=minute, second=second or 0
  433. )
  434. logger.info(f"Update to {timestamp}")
  435. profile = UserProfile.objects.filter(user_id=user_id).first()
  436. timestamp = profile.get_timestamp_with_tz(timestamp)
  437. if play_dict.get("durationMin") > 0:
  438. duration_seconds = play_dict.get("durationMin") * 60
  439. else:
  440. duration_seconds = base_game.run_time_seconds
  441. stop_timestamp = timestamp + timedelta(seconds=duration_seconds)
  442. logger.info(f"Creating scrobble for {base_game} at {timestamp}")
  443. scrobble_dict = {
  444. "user_id": user_id,
  445. "timestamp": timestamp,
  446. "playback_position_seconds": duration_seconds,
  447. "source": "BG Stats",
  448. "log": log_data,
  449. }
  450. scrobble = None
  451. if timestamp.year > 2023:
  452. logger.info(
  453. "Scrobbles older than 2024 likely have no time associated just create it"
  454. )
  455. scrobble = Scrobble.objects.filter(
  456. board_game=base_game, user_id=user_id, timestamp=timestamp
  457. ).first()
  458. if scrobble:
  459. logger.info(
  460. "Scrobble already exists, skipping",
  461. extra={"scrobble_dict": scrobble_dict, "user_id": user_id},
  462. )
  463. continue
  464. scrobble = Scrobble.create_or_update(
  465. base_game, user_id, scrobble_dict, skip_in_progress_check=True
  466. )
  467. scrobble.timezone = timestamp.tzinfo.name
  468. scrobble.stop_timestamp = stop_timestamp
  469. scrobble.in_progress = False
  470. scrobble.played_to_completion = True
  471. scrobble.save()
  472. scrobbles_created.append(scrobble)
  473. ScrobbleNtfyNotification(scrobble).send()
  474. return scrobbles_created
  475. def manual_scrobble_from_url(
  476. url: str, user_id: int, action: Optional[str] = None
  477. ) -> Scrobble:
  478. """We have scrobblable media URLs, and then any other webpages that
  479. we want to scrobble as a media type in and of itself. This checks whether
  480. we know about the content type, and routes it to the appropriate media
  481. scrobbler. Otherwise, return nothing."""
  482. content_key = ""
  483. domain = extract_domain(url)
  484. for key, content_urls in SCROBBLE_CONTENT_URLS.items():
  485. for content_url in content_urls:
  486. if domain in content_url:
  487. content_key = key
  488. item_id = None
  489. if not content_key:
  490. content_key = "-w"
  491. item_id = url
  492. # Try generic search for any URL with digit-based IDs
  493. if not item_id:
  494. try:
  495. item_id = re.findall(r"\d+", url)[0]
  496. except IndexError:
  497. pass
  498. if content_key == "-i" and "v=" in url:
  499. item_id = url.split("v=")[1].split("&")[0]
  500. elif content_key == "-c" and "comics" in url:
  501. item_id = url
  502. elif content_key == "-i" and "title/tt" in url:
  503. item_id = "tt" + str(item_id)
  504. scrobble_fn = MANUAL_SCROBBLE_FNS[content_key]
  505. return eval(scrobble_fn)(item_id, user_id, action=action)
  506. def todoist_scrobble_task_finish(
  507. todoist_task: dict, user_id: int, timestamp: datetime
  508. ) -> Optional[Scrobble]:
  509. scrobble = Scrobble.objects.filter(
  510. user_id=user_id,
  511. log__todoist_id=todoist_task.get("todoist_id"),
  512. in_progress=True,
  513. played_to_completion=False,
  514. ).first()
  515. if not scrobble:
  516. logger.info(
  517. "[todoist_scrobble_task_finish] todoist webhook finish called on missing task"
  518. )
  519. return
  520. scrobble.stop(timestamp=timestamp, force_finish=True)
  521. return scrobble
  522. def todoist_scrobble_update_task(
  523. todoist_note: dict, user_id: int
  524. ) -> Optional[Scrobble]:
  525. scrobble = Scrobble.objects.filter(
  526. in_progress=True,
  527. user_id=user_id,
  528. log__todoist_id=todoist_note.get("task_id"),
  529. ).first()
  530. if not scrobble:
  531. logger.info(
  532. "[todoist_scrobble_update_task] no task found",
  533. extra={
  534. "todoist_note": todoist_note,
  535. "user_id": user_id,
  536. "media_type": Scrobble.MediaType.TASK,
  537. },
  538. )
  539. return
  540. if not scrobble.log.get("notes"):
  541. scrobble.log["notes"] = []
  542. scrobble.log["notes"].append(todoist_note.get("notes"))
  543. scrobble.save(update_fields=["log"])
  544. logger.info(
  545. "[todoist_scrobble_update_task] todoist note added",
  546. extra={
  547. "todoist_note": todoist_note,
  548. "user_id": user_id,
  549. "media_type": Scrobble.MediaType.TASK,
  550. },
  551. )
  552. return scrobble
  553. def todoist_scrobble_task(
  554. todoist_task: dict,
  555. user_id: int,
  556. started: bool = False,
  557. stopped: bool = False,
  558. user_context_list: list[str] = [],
  559. ) -> Scrobble:
  560. title = get_title_from_labels(
  561. todoist_task.get("todoist_label_list", []), user_context_list
  562. )
  563. task = Task.find_or_create(title)
  564. timestamp = pendulum.parse(todoist_task.pop("updated_at", timezone.now()))
  565. in_progress_scrobble = Scrobble.objects.filter(
  566. user_id=user_id,
  567. in_progress=True,
  568. log__todoist_id=todoist_task.get("todoist_id"),
  569. task=task,
  570. ).last()
  571. if not in_progress_scrobble and stopped:
  572. logger.info(
  573. "[todoist_scrobble_task] cannot stop already stopped task",
  574. extra={
  575. "todoist_type": todoist_task["todoist_type"],
  576. "todoist_event": todoist_task["todoist_event"],
  577. "todoist_id": todoist_task["todoist_id"],
  578. },
  579. )
  580. return
  581. if in_progress_scrobble and started:
  582. logger.info(
  583. "[todoist_scrobble_task] cannot start already started task",
  584. extra={
  585. "todoist_type": todoist_task["todoist_type"],
  586. "todoist_event": todoist_task["todoist_event"],
  587. "todoist_id": todoist_task["todoist_id"],
  588. },
  589. )
  590. return in_progress_scrobble
  591. # Finish an in-progress scrobble
  592. if in_progress_scrobble and stopped:
  593. logger.info(
  594. "[todoist_scrobble_task] finishing",
  595. extra={
  596. "todoist_type": todoist_task["todoist_type"],
  597. "todoist_event": todoist_task["todoist_event"],
  598. "todoist_id": todoist_task["todoist_id"],
  599. },
  600. )
  601. return todoist_scrobble_task_finish(todoist_task, user_id, timestamp)
  602. todoist_task["title"] = todoist_task.pop("description")
  603. todoist_task["description"] = todoist_task.pop("details")
  604. todoist_task["labels"] = todoist_task.pop("todoist_label_list", [])
  605. todoist_task.pop("todoist_type")
  606. todoist_task.pop("todoist_event")
  607. scrobble_dict = {
  608. "user_id": user_id,
  609. "timestamp": timestamp,
  610. "playback_position_seconds": 0,
  611. "source": "Todoist",
  612. "log": todoist_task,
  613. }
  614. logger.info(
  615. "[todoist_scrobble_task] creating",
  616. extra={
  617. "task_id": task.id,
  618. "user_id": user_id,
  619. "scrobble_dict": scrobble_dict,
  620. "media_type": Scrobble.MediaType.TASK,
  621. },
  622. )
  623. scrobble = Scrobble.create_or_update(task, user_id, scrobble_dict)
  624. return scrobble
  625. def emacs_scrobble_update_task(
  626. emacs_id: str, emacs_notes: dict, user_id: int
  627. ) -> Optional[Scrobble]:
  628. scrobble = Scrobble.objects.filter(
  629. in_progress=True,
  630. user_id=user_id,
  631. log__orgmode_id=emacs_id,
  632. source="Org-mode",
  633. ).first()
  634. if not scrobble:
  635. logger.info(
  636. "[emacs_scrobble_update_task] no task found",
  637. extra={
  638. "emacs_notes": emacs_notes,
  639. "user_id": user_id,
  640. "media_type": Scrobble.MediaType.TASK,
  641. },
  642. )
  643. return
  644. notes_updated = False
  645. for note in emacs_notes:
  646. existing_note_ts = [
  647. n.get("timestamp") for n in scrobble.log.get("notes", [])
  648. ]
  649. if not scrobble.log.get('notes"'):
  650. scrobble.log["notes"] = []
  651. if note.get("timestamp") not in existing_note_ts:
  652. scrobble.log["notes"].append(
  653. {note.get("timestamp"): note.get("content")}
  654. )
  655. notes_updated = True
  656. if notes_updated:
  657. scrobble.save(update_fields=["log"])
  658. logger.info(
  659. "[emacs_scrobble_update_task] emacs note added",
  660. extra={
  661. "emacs_note": emacs_notes,
  662. "user_id": user_id,
  663. "media_type": Scrobble.MediaType.TASK,
  664. },
  665. )
  666. return scrobble
  667. def emacs_scrobble_task(
  668. task_data: dict,
  669. user_id: int,
  670. started: bool = False,
  671. stopped: bool = False,
  672. user_context_list: list[str] = [],
  673. ) -> Scrobble | None:
  674. orgmode_id = task_data.get("source_id")
  675. title = get_title_from_labels(
  676. task_data.get("labels", []), user_context_list
  677. )
  678. task = Task.find_or_create(title)
  679. timestamp = pendulum.parse(task_data.pop("updated_at", timezone.now()))
  680. in_progress_scrobble = Scrobble.objects.filter(
  681. user_id=user_id,
  682. in_progress=True,
  683. log__orgmode_id=orgmode_id,
  684. task=task,
  685. ).last()
  686. if not in_progress_scrobble and stopped:
  687. logger.info(
  688. "[emacs_scrobble_task] cannot stop already stopped task",
  689. extra={
  690. "orgmode_id": orgmode_id,
  691. },
  692. )
  693. return
  694. if in_progress_scrobble and started:
  695. logger.info(
  696. "[emacs_scrobble_task] cannot start already started task",
  697. extra={
  698. "ormode_id": orgmode_id,
  699. },
  700. )
  701. return in_progress_scrobble
  702. # Finish an in-progress scrobble
  703. if in_progress_scrobble and stopped:
  704. logger.info(
  705. "[emacs_scrobble_task] finishing",
  706. extra={
  707. "orgmode_id": orgmode_id,
  708. },
  709. )
  710. in_progress_scrobble.stop(timestamp=timestamp, force_finish=True)
  711. return in_progress_scrobble
  712. if in_progress_scrobble:
  713. return in_progress_scrobble
  714. notes = task_data.pop("notes")
  715. if notes:
  716. task_data["notes"] = [note.get("content") for note in notes]
  717. task_data["title"] = task_data.pop("description")
  718. task_data["description"] = task_data.pop("body")
  719. task_data["labels"] = task_data.pop("labels")
  720. task_data["orgmode_id"] = task_data.pop("source_id")
  721. task_data["orgmode_state"] = task_data.pop("state")
  722. task_data["orgmode_properties"] = task_data.pop("properties")
  723. task_data["orgmode_drawers"] = task_data.pop("drawers")
  724. task_data["orgmode_timestamps"] = task_data.pop("timestamps")
  725. task_data.pop("source")
  726. scrobble_dict = {
  727. "user_id": user_id,
  728. "timestamp": timestamp,
  729. "playback_position_seconds": 0,
  730. "source": "Org-mode",
  731. "log": task_data,
  732. }
  733. logger.info(
  734. "[emacs_scrobble_task] creating",
  735. extra={
  736. "task_id": task.id,
  737. "user_id": user_id,
  738. "scrobble_dict": scrobble_dict,
  739. "media_type": Scrobble.MediaType.TASK,
  740. },
  741. )
  742. scrobble = Scrobble.create_or_update(task, user_id, scrobble_dict)
  743. return scrobble
  744. def manual_scrobble_task(url: str, user_id: int, action: Optional[str] = None):
  745. source_id = re.findall(r"\d+", url)[0]
  746. if "todoist" in url:
  747. source = "Todoist"
  748. title = "Generic Todoist task"
  749. description = " ".join(url.split("/")[-1].split("-")[:-1]).capitalize()
  750. task = Task.find_or_create(title)
  751. scrobble_dict = {
  752. "user_id": user_id,
  753. "timestamp": timezone.now(),
  754. "playback_position_seconds": 0,
  755. "source": source,
  756. "log": {"description": description, "source_id": source_id},
  757. }
  758. logger.info(
  759. "[vrobbler-scrobble] webpage scrobble request received",
  760. extra={
  761. "task_id": task.id,
  762. "user_id": user_id,
  763. "scrobble_dict": scrobble_dict,
  764. "media_type": Scrobble.MediaType.WEBPAGE,
  765. },
  766. )
  767. scrobble = Scrobble.create_or_update(task, user_id, scrobble_dict)
  768. return scrobble
  769. def manual_scrobble_webpage(
  770. url: str, user_id: int, action: Optional[str] = None
  771. ):
  772. webpage = WebPage.find_or_create({"url": url})
  773. scrobble_dict = {
  774. "user_id": user_id,
  775. "timestamp": timezone.now(),
  776. "playback_position_seconds": 0,
  777. "source": "Vrobbler",
  778. }
  779. logger.info(
  780. "[vrobbler-scrobble] webpage scrobble request received",
  781. extra={
  782. "webpage_id": webpage.id,
  783. "user_id": user_id,
  784. "scrobble_dict": scrobble_dict,
  785. "media_type": Scrobble.MediaType.WEBPAGE,
  786. },
  787. )
  788. scrobble = Scrobble.create_or_update(webpage, user_id, scrobble_dict)
  789. if action == "stop":
  790. scrobble.stop(force_finish=True)
  791. else:
  792. # possibly async this?
  793. scrobble.push_to_archivebox()
  794. return scrobble
  795. def gpslogger_scrobble_location(data_dict: dict, user_id: int) -> Scrobble:
  796. location = GeoLocation.find_or_create(data_dict)
  797. timestamp = pendulum.parse(data_dict.get("time", timezone.now()))
  798. extra_data = {
  799. "user_id": user_id,
  800. "timestamp": timestamp,
  801. "source": "GPSLogger",
  802. "media_type": Scrobble.MediaType.GEO_LOCATION,
  803. }
  804. scrobble = Scrobble.create_or_update_location(
  805. location,
  806. extra_data,
  807. user_id,
  808. )
  809. provider = LOCATION_PROVIDERS[data_dict.get("prov")]
  810. if "gps_updates" not in scrobble.log.keys():
  811. scrobble.log["gps_updates"] = []
  812. scrobble.log["gps_updates"].append(
  813. {
  814. "timestamp": data_dict.get("time"),
  815. "position_provider": provider,
  816. }
  817. )
  818. if scrobble.timestamp:
  819. scrobble.playback_position_seconds = (
  820. timezone.now() - scrobble.timestamp
  821. ).seconds
  822. scrobble.save(update_fields=["log", "playback_position_seconds"])
  823. logger.info(
  824. "[gpslogger_webhook] gpslogger scrobble request received",
  825. extra={
  826. "scrobble_id": scrobble.id,
  827. "provider": provider,
  828. "user_id": user_id,
  829. "timestamp": extra_data.get("timestamp"),
  830. "raw_timestamp": data_dict.get("time"),
  831. "media_type": Scrobble.MediaType.GEO_LOCATION,
  832. },
  833. )
  834. return scrobble
  835. def web_scrobbler_scrobble_video_or_song(
  836. data_dict: dict, user_id: Optional[int]
  837. ) -> Scrobble:
  838. # We're not going to create music tracks, because the only time
  839. # we'd hit this is if we're listening to a concert or something.
  840. artist_name = data_dict.get("artist")
  841. track_name = data_dict.get("track")
  842. tracks = Track.objects.filter(
  843. artist__name=data_dict.get("artist"), title=data_dict.get("track")
  844. )
  845. if tracks.count() > 1:
  846. logger.warning(
  847. "Multiple tracks found for Web Scrobbler",
  848. extra={"artist": artist_name, "track": track_name},
  849. )
  850. track = tracks.first()
  851. # No track found, create a Video
  852. if not track:
  853. Video.get_from_youtube_id()
  854. # Now we run off a scrobble
  855. mopidy_data = {
  856. "user_id": user_id,
  857. "timestamp": timezone.now(),
  858. "playback_position_seconds": data_dict.get("playback_time_ticks"),
  859. "source": "Mopidy",
  860. "mopidy_status": data_dict.get("status"),
  861. }
  862. logger.info(
  863. "[scrobblers] webhook mopidy scrobble request received",
  864. extra={
  865. "episode_id": episode.id if episode else None,
  866. "user_id": user_id,
  867. "scrobble_dict": mopidy_data,
  868. "media_type": Scrobble.MediaType.PODCAST_EPISODE,
  869. },
  870. )
  871. scrobble = None
  872. if episode:
  873. scrobble = Scrobble.create_or_update(episode, user_id, mopidy_data)
  874. return scrobble
  875. def manual_scrobble_beer(
  876. untappd_id: str, user_id: int, action: Optional[str] = None
  877. ):
  878. beer = Beer.find_or_create(untappd_id)
  879. if not beer:
  880. logger.error(f"No beer found for Untappd ID {untappd_id}")
  881. return
  882. scrobble_dict = {
  883. "user_id": user_id,
  884. "timestamp": timezone.now(),
  885. "playback_position_seconds": 0,
  886. "source": "Vrobbler",
  887. }
  888. logger.info(
  889. "[vrobbler-scrobble] beer scrobble request received",
  890. extra={
  891. "beer_id": beer.id,
  892. "user_id": user_id,
  893. "scrobble_dict": scrobble_dict,
  894. "media_type": Scrobble.MediaType.BEER,
  895. },
  896. )
  897. # TODO Kick out a process to enrich the media here, and in every scrobble event
  898. return Scrobble.create_or_update(beer, user_id, scrobble_dict)
  899. def manual_scrobble_puzzle(
  900. ipdb_id: str, user_id: int, action: Optional[str] = None
  901. ):
  902. puzzle = Puzzle.find_or_create(ipdb_id)
  903. if not puzzle:
  904. logger.error(f"No puzzle found for IPDB ID {ipdb_id}")
  905. return
  906. scrobble_dict = {
  907. "user_id": user_id,
  908. "timestamp": timezone.now(),
  909. "playback_position_seconds": 0,
  910. "source": "Vrobbler",
  911. }
  912. logger.info(
  913. "[vrobbler-scrobble] puzzle scrobble request received",
  914. extra={
  915. "puzzle_id": puzzle.id,
  916. "user_id": user_id,
  917. "scrobble_dict": scrobble_dict,
  918. "media_type": Scrobble.MediaType.PUZZLE,
  919. },
  920. )
  921. # TODO Kick out a process to enrich the media here, and in every scrobble event
  922. return Scrobble.create_or_update(puzzle, user_id, scrobble_dict)
  923. def manual_scrobble_brickset(
  924. brickset_id: str, user_id: int, action: Optional[str] = None
  925. ):
  926. brickset = BrickSet.find_or_create(brickset_id)
  927. if not brickset:
  928. logger.error(f"No brickset found for Brickset ID {brickset_id}")
  929. return
  930. scrobble_dict = {
  931. "user_id": user_id,
  932. "timestamp": timezone.now(),
  933. "playback_position_seconds": 0,
  934. "source": "Vrobbler",
  935. "log": {"serial_scrobble_id": ""},
  936. }
  937. logger.info(
  938. "[vrobbler-scrobble] brickset scrobble request received",
  939. extra={
  940. "brickset_id": brickset.id,
  941. "user_id": user_id,
  942. "scrobble_dict": scrobble_dict,
  943. "media_type": Scrobble.MediaType.BRICKSET,
  944. },
  945. )
  946. # TODO Kick out a process to enrich the media here, and in every scrobble event
  947. # TODO Need to check for past scrobbles and auto populate serial scrobble id if possible
  948. return Scrobble.create_or_update(brickset, user_id, scrobble_dict)