scrobblers.py 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106
  1. import logging
  2. import re
  3. from datetime import datetime, timedelta
  4. from typing import Any, Optional
  5. import pendulum
  6. import pytz
  7. from beers.models import Beer
  8. from boardgames.models import BoardGame, BoardGameDesigner, BoardGameLocation
  9. from books.constants import READCOMICSONLINE_URL
  10. from books.models import Book, BookLogData, BookPageLogData
  11. from books.utils import parse_readcomicsonline_uri
  12. from bricksets.models import BrickSet
  13. from dateutil.parser import parse
  14. from django.utils import timezone
  15. from locations.constants import LOCATION_PROVIDERS
  16. from locations.models import GeoLocation
  17. from music.constants import JELLYFIN_POST_KEYS, MOPIDY_POST_KEYS
  18. from music.models import Track
  19. from people.models import Person
  20. from podcasts.models import PodcastEpisode
  21. from podcasts.utils import parse_mopidy_uri
  22. from profiles.models import UserProfile
  23. from puzzles.models import Puzzle
  24. from scrobbles.constants import (
  25. JELLYFIN_AUDIO_ITEM_TYPES,
  26. MANUAL_SCROBBLE_FNS,
  27. SCROBBLE_CONTENT_URLS,
  28. )
  29. from scrobbles.models import Scrobble
  30. from scrobbles.notifications import ScrobbleNtfyNotification
  31. from scrobbles.utils import convert_to_seconds, extract_domain
  32. from sports.models import SportEvent
  33. from sports.thesportsdb import lookup_event_from_thesportsdb
  34. from tasks.models import Task
  35. from tasks.utils import get_title_from_labels
  36. from videogames.howlongtobeat import lookup_game_from_hltb
  37. from videogames.models import VideoGame
  38. from videos.models import Video
  39. from webpages.models import WebPage
  40. logger = logging.getLogger(__name__)
  41. def mopidy_scrobble_media(post_data: dict, user_id: int) -> Scrobble:
  42. media_type = Scrobble.MediaType.TRACK
  43. if "podcast" in post_data.get("mopidy_uri", ""):
  44. media_type = Scrobble.MediaType.PODCAST_EPISODE
  45. logger.info(
  46. "[mopidy_webhook] called",
  47. extra={
  48. "user_id": user_id,
  49. "post_data": post_data,
  50. "media_type": media_type,
  51. },
  52. )
  53. if media_type == Scrobble.MediaType.PODCAST_EPISODE:
  54. parsed_data = parse_mopidy_uri(post_data.get("mopidy_uri", ""))
  55. if not parsed_data:
  56. logger.warning("Tried to scrobble podcast but no uri found", extra={"post_data": post_data})
  57. return Scrobble()
  58. media_obj = PodcastEpisode.find_or_create(**parsed_data)
  59. else:
  60. media_obj = Track.find_or_create(
  61. title=post_data.get("name", ""),
  62. artist_name=post_data.get("artist", ""),
  63. album_name=post_data.get("album", ""),
  64. run_time_seconds=post_data.get("run_time", 900000),
  65. )
  66. log = {}
  67. try:
  68. log = {"mopidy_source": post_data.get("mopidy_uri", "").split(":")[0]}
  69. except IndexError:
  70. pass
  71. return media_obj.scrobble_for_user(
  72. user_id,
  73. source="Mopidy",
  74. playback_position_seconds=int(
  75. post_data.get(MOPIDY_POST_KEYS.get("PLAYBACK_POSITION_TICKS"), 1)
  76. / 1000
  77. ),
  78. status=post_data.get(MOPIDY_POST_KEYS.get("STATUS"), ""),
  79. log=log,
  80. )
  81. def jellyfin_scrobble_media(
  82. post_data: dict, user_id: int
  83. ) -> Optional[Scrobble]:
  84. media_type = Scrobble.MediaType.VIDEO
  85. if post_data.pop("ItemType", "") in JELLYFIN_AUDIO_ITEM_TYPES:
  86. media_type = Scrobble.MediaType.TRACK
  87. null_position_on_progress = (
  88. post_data.get("PlaybackPosition") == "00:00:00"
  89. and post_data.get("NotificationType") == "PlaybackProgress"
  90. )
  91. # Jellyfin has some race conditions with it's webhooks, these hacks fix some of them
  92. if null_position_on_progress:
  93. logger.info(
  94. "[jellyfin_scrobble_media] no playback position tick, aborting",
  95. extra={"post_data": post_data},
  96. )
  97. return
  98. timestamp = parse(
  99. post_data.get(JELLYFIN_POST_KEYS.get("TIMESTAMP"), "")
  100. ).replace(tzinfo=pytz.utc)
  101. playback_position_seconds = int(
  102. post_data.get(JELLYFIN_POST_KEYS.get("PLAYBACK_POSITION_TICKS"), 1)
  103. / 10000000
  104. )
  105. if media_type == Scrobble.MediaType.VIDEO:
  106. media_obj = Video.get_from_imdb_id(
  107. post_data.get("Provider_imdb", "").replace("tt", "")
  108. )
  109. else:
  110. media_obj = Track.find_or_create(
  111. title=post_data.get("Name", ""),
  112. artist_name=post_data.get("Artist", ""),
  113. album_name=post_data.get("Album", ""),
  114. run_time_seconds=convert_to_seconds(
  115. post_data.get("RunTime", 900000)
  116. ),
  117. )
  118. # A hack because we don't worry about updating music ... we either finish it or we don't
  119. playback_position_seconds = 0
  120. if not media_obj:
  121. logger.info(
  122. "[jellyfin_scrobble_media] no video found from POST data",
  123. extra={"post_data": post_data},
  124. )
  125. return
  126. playback_status = "resumed"
  127. if post_data.get("IsPaused"):
  128. playback_status = "paused"
  129. elif post_data.get("NotificationType") == "PlaybackStop":
  130. playback_status = "stopped"
  131. return media_obj.scrobble_for_user(
  132. user_id,
  133. source=post_data.get(JELLYFIN_POST_KEYS.get("SOURCE")),
  134. playback_position_seconds=playback_position_seconds,
  135. status=playback_status,
  136. )
  137. def web_scrobbler_scrobble_media(
  138. youtube_id: str, user_id: int, status: str = "started"
  139. ) -> Optional[Scrobble]:
  140. video = Video.get_from_youtube_id(youtube_id)
  141. return video.scrobble_for_user(user_id, status, source="Web Scrobbler")
  142. def manual_scrobble_video(
  143. video_id: str, user_id: int, action: Optional[str] = None
  144. ):
  145. if "tt" in video_id:
  146. video = Video.get_from_imdb_id(video_id)
  147. else:
  148. video = Video.get_from_youtube_id(video_id)
  149. # When manually scrobbling, try finding a source from the series
  150. source = "Vrobbler"
  151. if video.tv_series:
  152. source = video.tv_series.preferred_source
  153. scrobble_dict = {
  154. "user_id": user_id,
  155. "timestamp": timezone.now(),
  156. "playback_position_seconds": 0,
  157. "source": source,
  158. }
  159. logger.info(
  160. "[scrobblers] manual video scrobble request received",
  161. extra={
  162. "video_id": video.id,
  163. "user_id": user_id,
  164. "scrobble_dict": scrobble_dict,
  165. "media_type": Scrobble.MediaType.VIDEO,
  166. },
  167. )
  168. scrobble = Scrobble.create_or_update(video, user_id, scrobble_dict)
  169. if action == "stop":
  170. scrobble.stop(force_finish=True)
  171. return scrobble
  172. def manual_scrobble_event(
  173. thesportsdb_id: str, user_id: int, action: Optional[str] = None
  174. ):
  175. data_dict = lookup_event_from_thesportsdb(thesportsdb_id)
  176. event = SportEvent.find_or_create(data_dict)
  177. scrobble_dict = {
  178. "user_id": user_id,
  179. "timestamp": timezone.now(),
  180. "playback_position_seconds": 0,
  181. "source": "TheSportsDB",
  182. }
  183. return Scrobble.create_or_update(event, user_id, scrobble_dict)
  184. def manual_scrobble_video_game(
  185. hltb_id: str, user_id: int, action: Optional[str] = None
  186. ):
  187. game = VideoGame.objects.filter(hltb_id=hltb_id).first()
  188. if not game:
  189. data_dict = lookup_game_from_hltb(hltb_id)
  190. if not data_dict:
  191. logger.info(
  192. "[manual_scrobble_video_game] game not found on hltb",
  193. extra={
  194. "hltb_id": hltb_id,
  195. "user_id": user_id,
  196. "media_type": Scrobble.MediaType.VIDEO_GAME,
  197. },
  198. )
  199. return
  200. game = VideoGame.find_or_create(data_dict)
  201. scrobble_dict = {
  202. "user_id": user_id,
  203. "timestamp": timezone.now(),
  204. "playback_position_seconds": 0,
  205. "source": "Vrobbler",
  206. "long_play_complete": False,
  207. }
  208. logger.info(
  209. "[scrobblers] manual video game scrobble request received",
  210. extra={
  211. "videogame_id": game.id,
  212. "user_id": user_id,
  213. "scrobble_dict": scrobble_dict,
  214. "media_type": Scrobble.MediaType.VIDEO_GAME,
  215. },
  216. )
  217. return Scrobble.create_or_update(game, user_id, scrobble_dict)
  218. def manual_scrobble_book(
  219. title: str, user_id: int, action: Optional[str] = None
  220. ):
  221. log = {}
  222. source = "Vrobbler"
  223. page = None
  224. url = None
  225. if READCOMICSONLINE_URL in title:
  226. url = title
  227. title, volume, page = parse_readcomicsonline_uri(title)
  228. if not title:
  229. logger.info(
  230. "[scrobblers] manual book scrobble request failed",
  231. extra={
  232. "title": title,
  233. "user_id": user_id,
  234. "media_type": Scrobble.MediaType.BOOK,
  235. },
  236. )
  237. return
  238. title = f"{title} - Issue {volume}"
  239. if not page:
  240. page = 1
  241. logger.info("[scrobblers] Book page included in scrobble, should update!")
  242. source = READCOMICSONLINE_URL.replace("https://", "")
  243. # TODO: Check for scrobble of this book already and if so, update the page count
  244. book = Book.find_or_create(title, enrich=True)
  245. scrobble_dict = {
  246. "user_id": user_id,
  247. "timestamp": timezone.now(),
  248. "playback_position_seconds": 0,
  249. "source": source,
  250. "long_play_complete": False,
  251. }
  252. logger.info(
  253. "[scrobblers] manual book scrobble request received",
  254. extra={
  255. "book_id": book.id,
  256. "user_id": user_id,
  257. "scrobble_dict": scrobble_dict,
  258. "media_type": Scrobble.MediaType.BOOK,
  259. },
  260. )
  261. scrobble = Scrobble.create_or_update(book, user_id, scrobble_dict, read_log_page=page)
  262. if action == "stop":
  263. if url:
  264. scrobble.log["resume_url"] = url
  265. scrobble.log["restart_url"] = remove_last_part(url)
  266. scrobble.save(update_fields=["log"])
  267. scrobble.stop(force_finish=True)
  268. return scrobble
  269. def manual_scrobble_board_game(
  270. bggeek_id: str, user_id: int, action: Optional[str] = None
  271. ) -> Scrobble | None:
  272. boardgame = BoardGame.find_or_create(bggeek_id)
  273. if not boardgame:
  274. logger.error(f"No board game found for ID {bggeek_id}")
  275. return
  276. scrobble_dict = {
  277. "user_id": user_id,
  278. "timestamp": timezone.now(),
  279. "playback_position_seconds": 0,
  280. "source": "Vrobbler",
  281. }
  282. logger.info(
  283. "[vrobbler-scrobble] board game scrobble request received",
  284. extra={
  285. "boardgame_id": boardgame.id,
  286. "user_id": user_id,
  287. "scrobble_dict": scrobble_dict,
  288. "media_type": Scrobble.MediaType.BOARD_GAME,
  289. },
  290. )
  291. return Scrobble.create_or_update(boardgame, user_id, scrobble_dict)
  292. def find_and_enrich_board_game_data(game_dict: dict) -> BoardGame | None:
  293. """TODO Move this to a utility somewhere"""
  294. game = BoardGame.find_or_create(game_dict.get("bggId"))
  295. if game:
  296. game.cooperative = game_dict.get("cooperative", False)
  297. game.highest_wins = game_dict.get("highestWins", True)
  298. game.no_points = game_dict.get("noPoints", False)
  299. game.uses_teams = game_dict.get("useTeams", False)
  300. game.bgstats_id = game_dict.get("uuid", None)
  301. if not game.rating:
  302. game.rating = game_dict.get("rating") / 10
  303. game.save()
  304. if game_dict.get("designers"):
  305. for designer_name in game_dict.get("designers", "").split(", "):
  306. designer, created = BoardGameDesigner.objects.get_or_create(
  307. name=designer_name
  308. )
  309. game.designers.add(designer.id)
  310. return game
  311. def email_scrobble_board_game(
  312. bgstat_data: dict[str, Any], user_id: int
  313. ) -> list[Scrobble]:
  314. game_list: list = bgstat_data.get("games", [])
  315. if not game_list:
  316. logger.info(
  317. "No game data from BG Stats, not scrobbling",
  318. extra={"bgstat_data": bgstat_data},
  319. )
  320. return []
  321. player_dict = {}
  322. for player in bgstat_data.get("players", []):
  323. if player.get("isAnonymous"):
  324. person, _created = Person.objects.get_or_create(name="Anonymous")
  325. else:
  326. person, _created = Person.objects.get_or_create(
  327. bgstats_id=player.get("uuid")
  328. )
  329. if not person.name:
  330. person.name = player.get("name", "")
  331. person.save()
  332. player_dict[player.get("id")] = person
  333. base_games = {}
  334. expansions = {}
  335. log_data = {}
  336. for game in game_list:
  337. logger.info(f"Finding and enriching {game.get('name')}")
  338. enriched_game = find_and_enrich_board_game_data(game)
  339. if game.get("isBaseGame"):
  340. base_games[game.get("id")] = enriched_game
  341. if game.get("isExpansion"):
  342. expansions[game.get("id")] = enriched_game
  343. locations = {}
  344. for location_dict in bgstat_data.get("locations", []):
  345. location, _created = BoardGameLocation.objects.get_or_create(
  346. bgstats_id=location_dict.get("uuid")
  347. )
  348. update_fields = []
  349. if not location.name:
  350. location.name = location_dict.get("name")
  351. update_fields.append("name")
  352. geoloc = GeoLocation.objects.filter(
  353. title__icontains=location.name
  354. ).first()
  355. if geoloc:
  356. location.geo_location = geoloc
  357. update_fields.append("geo_location")
  358. if update_fields:
  359. location.save(update_fields=update_fields)
  360. locations[location_dict.get("id")] = location
  361. scrobbles_created = []
  362. second = 0
  363. for play_dict in bgstat_data.get("plays", []):
  364. hour = None
  365. minute = None
  366. second = None
  367. if "comments" in play_dict.keys():
  368. for line in play_dict.get("comments", "").split("\n"):
  369. if "Learning to play" in line:
  370. log_data["learning"] = True
  371. if "Start time:" in line:
  372. start_time = line.split(": ")[1]
  373. pieces = start_time.split(":")
  374. hour = int(pieces[0])
  375. minute = int(pieces[1])
  376. try:
  377. second = int(pieces[2])
  378. except IndexError:
  379. second = 0
  380. log_data["notes"] = [play_dict.get("comments")]
  381. log_data["expansion_ids"] = []
  382. try:
  383. base_game = base_games[play_dict.get("gameRefId")]
  384. except KeyError:
  385. try:
  386. base_game = expansions[play_dict.get("gameRefId")]
  387. except KeyError:
  388. logger.info(
  389. "Skipping scrobble of play, can't find game",
  390. extra={"play_dict": play_dict},
  391. )
  392. continue
  393. for eplay in play_dict.get("expansionPlays", []):
  394. expansion = expansions[eplay.get("gameRefId")]
  395. expansion.expansion_for_boardgame = base_game
  396. expansion.save()
  397. log_data["expansion_ids"].append(expansion.id)
  398. if log_data.get("expansion_ids") == []:
  399. log_data.pop("expansion_ids")
  400. if play_dict.get("locationRefId", False):
  401. log_data["location_id"] = locations[
  402. play_dict.get("locationRefId")
  403. ].id
  404. if play_dict.get("rounds", False):
  405. log_data["rounds"] = play_dict.get("rounds")
  406. if play_dict.get("board", False):
  407. log_data["board"] = play_dict.get("board")
  408. log_data["players"] = []
  409. for score_dict in play_dict.get("playerScores", []):
  410. log_data["players"].append(
  411. {
  412. "person_id": player_dict[score_dict.get("playerRefId")].id,
  413. "new": score_dict.get("newPlayer"),
  414. "win": score_dict.get("winner"),
  415. "score": score_dict.get("score"),
  416. "rank": score_dict.get("rank"),
  417. "seat_order": score_dict.get("seatOrder"),
  418. "role": score_dict.get("role"),
  419. }
  420. )
  421. timestamp = parse(play_dict.get("playDate"))
  422. if hour and minute:
  423. logger.info(f"Scrobble playDate has manual start time {timestamp}")
  424. timestamp = timestamp.replace(
  425. hour=hour, minute=minute, second=second or 0
  426. )
  427. logger.info(f"Update to {timestamp}")
  428. profile = UserProfile.objects.filter(user_id=user_id).first()
  429. timestamp = profile.get_timestamp_with_tz(timestamp)
  430. if play_dict.get("durationMin") > 0:
  431. duration_seconds = play_dict.get("durationMin") * 60
  432. else:
  433. duration_seconds = base_game.run_time_seconds
  434. stop_timestamp = timestamp + timedelta(seconds=duration_seconds)
  435. logger.info(f"Creating scrobble for {base_game} at {timestamp}")
  436. scrobble_dict = {
  437. "user_id": user_id,
  438. "timestamp": timestamp,
  439. "playback_position_seconds": duration_seconds,
  440. "source": "BG Stats",
  441. "log": log_data,
  442. }
  443. scrobble = None
  444. if timestamp.year > 2023:
  445. logger.info(
  446. "Scrobbles older than 2024 likely have no time associated just create it"
  447. )
  448. scrobble = Scrobble.objects.filter(
  449. board_game=base_game, user_id=user_id, timestamp=timestamp
  450. ).first()
  451. if scrobble:
  452. logger.info(
  453. "Scrobble already exists, skipping",
  454. extra={"scrobble_dict": scrobble_dict, "user_id": user_id},
  455. )
  456. continue
  457. scrobble = Scrobble.create_or_update(
  458. base_game, user_id, scrobble_dict, skip_in_progress_check=True
  459. )
  460. scrobble.timezone = timestamp.tzinfo.name
  461. scrobble.stop_timestamp = stop_timestamp
  462. scrobble.in_progress = False
  463. scrobble.played_to_completion = True
  464. scrobble.save()
  465. scrobbles_created.append(scrobble)
  466. ScrobbleNtfyNotification(scrobble).send()
  467. return scrobbles_created
  468. def manual_scrobble_from_url(
  469. url: str, user_id: int, action: Optional[str] = None
  470. ) -> Scrobble:
  471. """We have scrobblable media URLs, and then any other webpages that
  472. we want to scrobble as a media type in and of itself. This checks whether
  473. we know about the content type, and routes it to the appropriate media
  474. scrobbler. Otherwise, return nothing."""
  475. content_key = ""
  476. domain = extract_domain(url)
  477. for key, content_urls in SCROBBLE_CONTENT_URLS.items():
  478. for content_url in content_urls:
  479. if domain in content_url:
  480. content_key = key
  481. item_id = None
  482. if not content_key:
  483. content_key = "-w"
  484. item_id = url
  485. # Try generic search for any URL with digit-based IDs
  486. if not item_id:
  487. try:
  488. item_id = re.findall(r"\d+", url)[0]
  489. except IndexError:
  490. pass
  491. if content_key == "-i" and "v=" in url:
  492. item_id = url.split("v=")[1].split("&")[0]
  493. elif content_key == "-c" and "comics" in url:
  494. item_id = url
  495. elif content_key == "-i" and "title/tt" in url:
  496. item_id = "tt" + str(item_id)
  497. scrobble_fn = MANUAL_SCROBBLE_FNS[content_key]
  498. return eval(scrobble_fn)(item_id, user_id, action=action)
  499. def todoist_scrobble_task_finish(
  500. todoist_task: dict, user_id: int, timestamp: datetime
  501. ) -> Optional[Scrobble]:
  502. scrobble = Scrobble.objects.filter(
  503. user_id=user_id,
  504. log__todoist_id=todoist_task.get("todoist_id"),
  505. in_progress=True,
  506. played_to_completion=False,
  507. ).first()
  508. if not scrobble:
  509. logger.info(
  510. "[todoist_scrobble_task_finish] todoist webhook finish called on missing task"
  511. )
  512. return
  513. scrobble.stop(timestamp=timestamp, force_finish=True)
  514. return scrobble
  515. def todoist_scrobble_update_task(
  516. todoist_note: dict, user_id: int
  517. ) -> Optional[Scrobble]:
  518. scrobble = Scrobble.objects.filter(
  519. in_progress=True,
  520. user_id=user_id,
  521. log__todoist_id=todoist_note.get("task_id"),
  522. ).first()
  523. if not scrobble:
  524. logger.info(
  525. "[todoist_scrobble_update_task] no task found",
  526. extra={
  527. "todoist_note": todoist_note,
  528. "user_id": user_id,
  529. "media_type": Scrobble.MediaType.TASK,
  530. },
  531. )
  532. return
  533. if not scrobble.log.get("notes"):
  534. scrobble.log["notes"] = []
  535. scrobble.log["notes"].append(todoist_note.get("notes"))
  536. scrobble.save(update_fields=["log"])
  537. logger.info(
  538. "[todoist_scrobble_update_task] todoist note added",
  539. extra={
  540. "todoist_note": todoist_note,
  541. "user_id": user_id,
  542. "media_type": Scrobble.MediaType.TASK,
  543. },
  544. )
  545. return scrobble
  546. def todoist_scrobble_task(
  547. todoist_task: dict,
  548. user_id: int,
  549. started: bool = False,
  550. stopped: bool = False,
  551. user_context_list: list[str] = [],
  552. ) -> Scrobble:
  553. title = get_title_from_labels(
  554. todoist_task.get("todoist_label_list", []), user_context_list
  555. )
  556. task = Task.find_or_create(title)
  557. timestamp = pendulum.parse(todoist_task.pop("updated_at", timezone.now()))
  558. in_progress_scrobble = Scrobble.objects.filter(
  559. user_id=user_id,
  560. in_progress=True,
  561. log__todoist_id=todoist_task.get("todoist_id"),
  562. task=task,
  563. ).last()
  564. if not in_progress_scrobble and stopped:
  565. logger.info(
  566. "[todoist_scrobble_task] cannot stop already stopped task",
  567. extra={
  568. "todoist_type": todoist_task["todoist_type"],
  569. "todoist_event": todoist_task["todoist_event"],
  570. "todoist_id": todoist_task["todoist_id"],
  571. },
  572. )
  573. return
  574. if in_progress_scrobble and started:
  575. logger.info(
  576. "[todoist_scrobble_task] cannot start already started task",
  577. extra={
  578. "todoist_type": todoist_task["todoist_type"],
  579. "todoist_event": todoist_task["todoist_event"],
  580. "todoist_id": todoist_task["todoist_id"],
  581. },
  582. )
  583. return in_progress_scrobble
  584. # Finish an in-progress scrobble
  585. if in_progress_scrobble and stopped:
  586. logger.info(
  587. "[todoist_scrobble_task] finishing",
  588. extra={
  589. "todoist_type": todoist_task["todoist_type"],
  590. "todoist_event": todoist_task["todoist_event"],
  591. "todoist_id": todoist_task["todoist_id"],
  592. },
  593. )
  594. return todoist_scrobble_task_finish(todoist_task, user_id, timestamp)
  595. todoist_task["title"] = todoist_task.pop("description")
  596. todoist_task["description"] = todoist_task.pop("details")
  597. todoist_task["labels"] = todoist_task.pop("todoist_label_list", [])
  598. todoist_task.pop("todoist_type")
  599. todoist_task.pop("todoist_event")
  600. scrobble_dict = {
  601. "user_id": user_id,
  602. "timestamp": timestamp,
  603. "playback_position_seconds": 0,
  604. "source": "Todoist",
  605. "log": todoist_task,
  606. }
  607. logger.info(
  608. "[todoist_scrobble_task] creating",
  609. extra={
  610. "task_id": task.id,
  611. "user_id": user_id,
  612. "scrobble_dict": scrobble_dict,
  613. "media_type": Scrobble.MediaType.TASK,
  614. },
  615. )
  616. scrobble = Scrobble.create_or_update(task, user_id, scrobble_dict)
  617. return scrobble
  618. def emacs_scrobble_update_task(
  619. emacs_id: str, emacs_notes: dict, user_id: int
  620. ) -> Optional[Scrobble]:
  621. scrobble = Scrobble.objects.filter(
  622. in_progress=True,
  623. user_id=user_id,
  624. log__orgmode_id=emacs_id,
  625. source="Org-mode",
  626. ).first()
  627. if not scrobble:
  628. logger.info(
  629. "[emacs_scrobble_update_task] no task found",
  630. extra={
  631. "emacs_notes": emacs_notes,
  632. "user_id": user_id,
  633. "media_type": Scrobble.MediaType.TASK,
  634. },
  635. )
  636. return
  637. notes_updated = False
  638. for note in emacs_notes:
  639. existing_note_ts = [
  640. n.get("timestamp") for n in scrobble.log.get("notes", [])
  641. ]
  642. if not scrobble.log.get('notes"'):
  643. scrobble.log["notes"] = []
  644. if note.get("timestamp") not in existing_note_ts:
  645. scrobble.log["notes"].append(
  646. {note.get("timestamp"): note.get("content")}
  647. )
  648. notes_updated = True
  649. if notes_updated:
  650. scrobble.save(update_fields=["log"])
  651. logger.info(
  652. "[emacs_scrobble_update_task] emacs note added",
  653. extra={
  654. "emacs_note": emacs_notes,
  655. "user_id": user_id,
  656. "media_type": Scrobble.MediaType.TASK,
  657. },
  658. )
  659. return scrobble
  660. def emacs_scrobble_task(
  661. task_data: dict,
  662. user_id: int,
  663. started: bool = False,
  664. stopped: bool = False,
  665. user_context_list: list[str] = [],
  666. ) -> Scrobble | None:
  667. orgmode_id = task_data.get("source_id")
  668. title = get_title_from_labels(
  669. task_data.get("labels", []), user_context_list
  670. )
  671. task = Task.find_or_create(title)
  672. timestamp = pendulum.parse(task_data.pop("updated_at", timezone.now()))
  673. in_progress_scrobble = Scrobble.objects.filter(
  674. user_id=user_id,
  675. in_progress=True,
  676. log__orgmode_id=orgmode_id,
  677. log__source="orgmode",
  678. task=task,
  679. ).last()
  680. if not in_progress_scrobble and stopped:
  681. logger.info(
  682. "[emacs_scrobble_task] cannot stop already stopped task",
  683. extra={
  684. "orgmode_id": orgmode_id,
  685. },
  686. )
  687. return
  688. if in_progress_scrobble and started:
  689. logger.info(
  690. "[emacs_scrobble_task] cannot start already started task",
  691. extra={
  692. "ormode_id": orgmode_id,
  693. },
  694. )
  695. return in_progress_scrobble
  696. # Finish an in-progress scrobble
  697. if in_progress_scrobble and stopped:
  698. logger.info(
  699. "[emacs_scrobble_task] finishing",
  700. extra={
  701. "orgmode_id": orgmode_id,
  702. },
  703. )
  704. in_progress_scrobble.stop(timestamp=timestamp, force_finish=True)
  705. return in_progress_scrobble
  706. if in_progress_scrobble:
  707. return in_progress_scrobble
  708. notes = task_data.pop("notes")
  709. if notes:
  710. task_data["notes"] = [note.get("content") for note in notes]
  711. task_data["title"] = task_data.pop("description")
  712. task_data["description"] = task_data.pop("body")
  713. task_data["labels"] = task_data.pop("labels")
  714. task_data["orgmode_id"] = task_data.pop("source_id")
  715. task_data["orgmode_state"] = task_data.pop("state")
  716. task_data["orgmode_properties"] = task_data.pop("properties")
  717. task_data["orgmode_drawers"] = task_data.pop("drawers")
  718. task_data["orgmode_timestamps"] = task_data.pop("timestamps")
  719. task_data.pop("source")
  720. scrobble_dict = {
  721. "user_id": user_id,
  722. "timestamp": timestamp,
  723. "playback_position_seconds": 0,
  724. "source": "Org-mode",
  725. "log": task_data,
  726. }
  727. logger.info(
  728. "[emacs_scrobble_task] creating",
  729. extra={
  730. "task_id": task.id,
  731. "user_id": user_id,
  732. "scrobble_dict": scrobble_dict,
  733. "media_type": Scrobble.MediaType.TASK,
  734. },
  735. )
  736. scrobble = Scrobble.create_or_update(task, user_id, scrobble_dict)
  737. return scrobble
  738. def manual_scrobble_task(url: str, user_id: int, action: Optional[str] = None):
  739. source_id = re.findall(r"\d+", url)[0]
  740. if "todoist" in url:
  741. source = "Todoist"
  742. title = "Generic Todoist task"
  743. description = " ".join(url.split("/")[-1].split("-")[:-1]).capitalize()
  744. task = Task.find_or_create(title)
  745. scrobble_dict = {
  746. "user_id": user_id,
  747. "timestamp": timezone.now(),
  748. "playback_position_seconds": 0,
  749. "source": source,
  750. "log": {"description": description, "source_id": source_id},
  751. }
  752. logger.info(
  753. "[vrobbler-scrobble] webpage scrobble request received",
  754. extra={
  755. "task_id": task.id,
  756. "user_id": user_id,
  757. "scrobble_dict": scrobble_dict,
  758. "media_type": Scrobble.MediaType.WEBPAGE,
  759. },
  760. )
  761. scrobble = Scrobble.create_or_update(task, user_id, scrobble_dict)
  762. return scrobble
  763. def manual_scrobble_webpage(
  764. url: str, user_id: int, action: Optional[str] = None
  765. ):
  766. webpage = WebPage.find_or_create({"url": url})
  767. scrobble_dict = {
  768. "user_id": user_id,
  769. "timestamp": timezone.now(),
  770. "playback_position_seconds": 0,
  771. "source": "Vrobbler",
  772. }
  773. logger.info(
  774. "[vrobbler-scrobble] webpage scrobble request received",
  775. extra={
  776. "webpage_id": webpage.id,
  777. "user_id": user_id,
  778. "scrobble_dict": scrobble_dict,
  779. "media_type": Scrobble.MediaType.WEBPAGE,
  780. },
  781. )
  782. scrobble = Scrobble.create_or_update(webpage, user_id, scrobble_dict)
  783. if action == "stop":
  784. scrobble.stop(force_finish=True)
  785. else:
  786. # possibly async this?
  787. scrobble.push_to_archivebox()
  788. return scrobble
  789. def gpslogger_scrobble_location(data_dict: dict, user_id: int) -> Scrobble:
  790. location = GeoLocation.find_or_create(data_dict)
  791. timestamp = pendulum.parse(data_dict.get("time", timezone.now()))
  792. extra_data = {
  793. "user_id": user_id,
  794. "timestamp": timestamp,
  795. "source": "GPSLogger",
  796. "media_type": Scrobble.MediaType.GEO_LOCATION,
  797. }
  798. scrobble = Scrobble.create_or_update_location(
  799. location,
  800. extra_data,
  801. user_id,
  802. )
  803. provider = LOCATION_PROVIDERS[data_dict.get("prov")]
  804. if "gps_updates" not in scrobble.log.keys():
  805. scrobble.log["gps_updates"] = []
  806. scrobble.log["gps_updates"].append(
  807. {
  808. "timestamp": data_dict.get("time"),
  809. "position_provider": provider,
  810. }
  811. )
  812. if scrobble.timestamp:
  813. scrobble.playback_position_seconds = (
  814. timezone.now() - scrobble.timestamp
  815. ).seconds
  816. scrobble.save(update_fields=["log", "playback_position_seconds"])
  817. logger.info(
  818. "[gpslogger_webhook] gpslogger scrobble request received",
  819. extra={
  820. "scrobble_id": scrobble.id,
  821. "provider": provider,
  822. "user_id": user_id,
  823. "timestamp": extra_data.get("timestamp"),
  824. "raw_timestamp": data_dict.get("time"),
  825. "media_type": Scrobble.MediaType.GEO_LOCATION,
  826. },
  827. )
  828. return scrobble
  829. def web_scrobbler_scrobble_video_or_song(
  830. data_dict: dict, user_id: Optional[int]
  831. ) -> Scrobble:
  832. # We're not going to create music tracks, because the only time
  833. # we'd hit this is if we're listening to a concert or something.
  834. artist_name = data_dict.get("artist")
  835. track_name = data_dict.get("track")
  836. tracks = Track.objects.filter(
  837. artist__name=data_dict.get("artist"), title=data_dict.get("track")
  838. )
  839. if tracks.count() > 1:
  840. logger.warning(
  841. "Multiple tracks found for Web Scrobbler",
  842. extra={"artist": artist_name, "track": track_name},
  843. )
  844. track = tracks.first()
  845. # No track found, create a Video
  846. if not track:
  847. Video.get_from_youtube_id()
  848. # Now we run off a scrobble
  849. mopidy_data = {
  850. "user_id": user_id,
  851. "timestamp": timezone.now(),
  852. "playback_position_seconds": data_dict.get("playback_time_ticks"),
  853. "source": "Mopidy",
  854. "mopidy_status": data_dict.get("status"),
  855. }
  856. logger.info(
  857. "[scrobblers] webhook mopidy scrobble request received",
  858. extra={
  859. "episode_id": episode.id if episode else None,
  860. "user_id": user_id,
  861. "scrobble_dict": mopidy_data,
  862. "media_type": Scrobble.MediaType.PODCAST_EPISODE,
  863. },
  864. )
  865. scrobble = None
  866. if episode:
  867. scrobble = Scrobble.create_or_update(episode, user_id, mopidy_data)
  868. return scrobble
  869. def manual_scrobble_beer(
  870. untappd_id: str, user_id: int, action: Optional[str] = None
  871. ):
  872. beer = Beer.find_or_create(untappd_id)
  873. if not beer:
  874. logger.error(f"No beer found for Untappd ID {untappd_id}")
  875. return
  876. scrobble_dict = {
  877. "user_id": user_id,
  878. "timestamp": timezone.now(),
  879. "playback_position_seconds": 0,
  880. "source": "Vrobbler",
  881. }
  882. logger.info(
  883. "[vrobbler-scrobble] beer scrobble request received",
  884. extra={
  885. "beer_id": beer.id,
  886. "user_id": user_id,
  887. "scrobble_dict": scrobble_dict,
  888. "media_type": Scrobble.MediaType.BEER,
  889. },
  890. )
  891. # TODO Kick out a process to enrich the media here, and in every scrobble event
  892. return Scrobble.create_or_update(beer, user_id, scrobble_dict)
  893. def manual_scrobble_puzzle(
  894. ipdb_id: str, user_id: int, action: Optional[str] = None
  895. ):
  896. puzzle = Puzzle.find_or_create(ipdb_id)
  897. if not puzzle:
  898. logger.error(f"No puzzle found for IPDB ID {ipdb_id}")
  899. return
  900. scrobble_dict = {
  901. "user_id": user_id,
  902. "timestamp": timezone.now(),
  903. "playback_position_seconds": 0,
  904. "source": "Vrobbler",
  905. }
  906. logger.info(
  907. "[vrobbler-scrobble] puzzle scrobble request received",
  908. extra={
  909. "puzzle_id": puzzle.id,
  910. "user_id": user_id,
  911. "scrobble_dict": scrobble_dict,
  912. "media_type": Scrobble.MediaType.PUZZLE,
  913. },
  914. )
  915. # TODO Kick out a process to enrich the media here, and in every scrobble event
  916. return Scrobble.create_or_update(puzzle, user_id, scrobble_dict)
  917. def manual_scrobble_brickset(
  918. brickset_id: str, user_id: int, action: Optional[str] = None
  919. ):
  920. brickset = BrickSet.find_or_create(brickset_id)
  921. if not brickset:
  922. logger.error(f"No brickset found for Brickset ID {brickset_id}")
  923. return
  924. scrobble_dict = {
  925. "user_id": user_id,
  926. "timestamp": timezone.now(),
  927. "playback_position_seconds": 0,
  928. "source": "Vrobbler",
  929. "log": {"serial_scrobble_id": ""},
  930. }
  931. logger.info(
  932. "[vrobbler-scrobble] brickset scrobble request received",
  933. extra={
  934. "brickset_id": brickset.id,
  935. "user_id": user_id,
  936. "scrobble_dict": scrobble_dict,
  937. "media_type": Scrobble.MediaType.BRICKSET,
  938. },
  939. )
  940. # TODO Kick out a process to enrich the media here, and in every scrobble event
  941. # TODO Need to check for past scrobbles and auto populate serial scrobble id if possible
  942. return Scrobble.create_or_update(brickset, user_id, scrobble_dict)