From e642d50c4707a720e76bdc5c8d4e6ccc728eb2d8 Mon Sep 17 00:00:00 2001 From: glax Date: Tue, 10 Oct 2023 22:40:44 +0200 Subject: [PATCH] #64 Bato Comment: This website suuuucks to scrape. There is gonna be so many issues --- Tranga/MangaConnectors/Bato.cs | 207 ++++++++++++++++++ .../MangaConnectorJsonConverter.cs | 2 + Tranga/Tranga.cs | 3 +- 3 files changed, 211 insertions(+), 1 deletion(-) create mode 100644 Tranga/MangaConnectors/Bato.cs diff --git a/Tranga/MangaConnectors/Bato.cs b/Tranga/MangaConnectors/Bato.cs new file mode 100644 index 0000000..d62670e --- /dev/null +++ b/Tranga/MangaConnectors/Bato.cs @@ -0,0 +1,207 @@ +using System.Net; +using System.Text.RegularExpressions; +using HtmlAgilityPack; +using Tranga.Jobs; + +namespace Tranga.MangaConnectors; + +public class Bato : MangaConnector +{ + public Bato(GlobalBase clone) : base(clone, "Bato") + { + this.downloadClient = new HttpDownloadClient(clone, new Dictionary() + { + {1, 60} + }); + } + + public override Manga[] GetManga(string publicationTitle = "") + { + Log($"Searching Publications. Term=\"{publicationTitle}\""); + string sanitizedTitle = string.Join(' ', Regex.Matches(publicationTitle, "[A-z]*").Where(m => m.Value.Length > 0)).ToLower(); + string requestUrl = $"https://bato.to/v3x-search?word={sanitizedTitle}&lang=en"; + DownloadClient.RequestResult requestResult = + downloadClient.MakeRequest(requestUrl, 1); + if ((int)requestResult.statusCode < 200 || (int)requestResult.statusCode >= 300) + return Array.Empty(); + + if (requestResult.htmlDocument is null) + { + Log($"Failed to retrieve site"); + return Array.Empty(); + } + + Manga[] publications = ParsePublicationsFromHtml(requestResult.htmlDocument); + Log($"Retrieved {publications.Length} publications. Term=\"{publicationTitle}\""); + return publications; + } + + public override Manga? GetMangaFromUrl(string url) + { + DownloadClient.RequestResult requestResult = + downloadClient.MakeRequest(url, 1); + if ((int)requestResult.statusCode < 200 || (int)requestResult.statusCode >= 300) + return null; + if (requestResult.htmlDocument is null) + { + Log($"Failed to retrieve site"); + return null; + } + return ParseSinglePublicationFromHtml(requestResult.htmlDocument, url.Split('/')[^1]); + } + + private Manga[] ParsePublicationsFromHtml(HtmlDocument document) + { + HtmlNode mangaList = document.DocumentNode.SelectSingleNode("//div[@data-hk='0-0-2']"); + + List urls = mangaList.ChildNodes + .Select(node => $"https://bato.to{node.Descendants("div").First().FirstChild.GetAttributeValue("href", "")}").ToList(); + + HashSet ret = new(); + foreach (string url in urls) + { + Manga? manga = GetMangaFromUrl(url); + if (manga is not null) + ret.Add((Manga)manga); + } + + return ret.ToArray(); + } + + private Manga ParseSinglePublicationFromHtml(HtmlDocument document, string publicationId) + { + HtmlNode infoNode = document.DocumentNode.SelectSingleNode("/html/body/div/main/div[1]/div[2]"); + + string sortName = infoNode.Descendants("h3").First().InnerText; + string description = document.DocumentNode + .SelectSingleNode("//div[contains(concat(' ',normalize-space(@class),' '),'prose')]").InnerText; + + string[] altTitlesList = infoNode.ChildNodes[1].ChildNodes[2].InnerText.Split('/'); + int i = 0; + Dictionary altTitles = altTitlesList.ToDictionary(s => i++.ToString(), s => s); + + string posterUrl = document.DocumentNode.SelectNodes("//img") + .First(child => child.GetAttributeValue("data-hk", "") == "0-1-0").GetAttributeValue("src", "").Replace("&", "&"); + string coverFileNameInCache = SaveCoverImageToCache(posterUrl, 1); + + List genreNodes = document.DocumentNode.SelectSingleNode("//b[text()='Genres:']/..").SelectNodes("span").ToList(); + string[] tags = genreNodes.Select(node => node.FirstChild.InnerText).ToArray(); + + List authorsNodes = infoNode.ChildNodes[1].ChildNodes[3].Descendants("a").ToList(); + List authors = authorsNodes.Select(node => node.InnerText).ToList(); + + HtmlNode? originalLanguageNode = document.DocumentNode.SelectSingleNode("//span[text()='Tr From']/.."); + string originalLanguage = originalLanguageNode is not null ? originalLanguageNode.LastChild.InnerText : ""; + + if (!int.TryParse( + document.DocumentNode.SelectSingleNode("//span[text()='Original Publication:']/..").LastChild.InnerText.Split('-')[0], + out int year)) + year = DateTime.Now.Year; + + string status = document.DocumentNode.SelectSingleNode("//span[text()='Original Publication:']/..") + .ChildNodes[2].InnerText; + + Manga manga = new (sortName, authors, description, altTitles, tags, posterUrl, coverFileNameInCache, new Dictionary(), + year, originalLanguage, status, publicationId); + cachedPublications.Add(manga); + return manga; + } + + public override Chapter[] GetChapters(Manga manga, string language="en") + { + Log($"Getting chapters {manga}"); + string requestUrl = $"https://bato.to/title/{manga.publicationId}"; + // Leaving this in for verification if the page exists + DownloadClient.RequestResult requestResult = + downloadClient.MakeRequest(requestUrl, 1); + if ((int)requestResult.statusCode < 200 || (int)requestResult.statusCode >= 300) + return Array.Empty(); + + //Return Chapters ordered by Chapter-Number + List chapters = ParseChaptersFromHtml(manga, requestUrl); + Log($"Got {chapters.Count} chapters. {manga}"); + return chapters.OrderBy(chapter => Convert.ToSingle(chapter.chapterNumber, numberFormatDecimalPoint)).ToArray(); + } + + private List ParseChaptersFromHtml(Manga manga, string mangaUrl) + { + // Using HtmlWeb will include the chapters since they are loaded with js + HtmlWeb web = new(); + HtmlDocument document = web.Load(mangaUrl); + + List ret = new(); + + HtmlNode chapterList = + document.DocumentNode.SelectSingleNode("/html/body/div/main/div[3]/astro-island/div/div[2]/div/div/astro-slot"); + + Regex chapterNumberRex = new(@"Chapter ([0-9\.]+)"); + + foreach (HtmlNode chapterInfo in chapterList.SelectNodes("div")) + { + HtmlNode infoNode = chapterInfo.FirstChild.FirstChild; + string fullString = infoNode.InnerText; + + string? volumeNumber = null; + string chapterNumber = chapterNumberRex.Match(fullString).Groups[1].Value; + string chapterName = chapterNumber; + string url = $"https://bato.to{infoNode.GetAttributeValue("href", "")}?load=2"; + ret.Add(new Chapter(manga, chapterName, volumeNumber, chapterNumber, url)); + } + + return ret; + } + + public override HttpStatusCode DownloadChapter(Chapter chapter, ProgressToken? progressToken = null) + { + if (progressToken?.cancellationRequested ?? false) + { + progressToken.Cancel(); + return HttpStatusCode.RequestTimeout; + } + + Manga chapterParentManga = chapter.parentManga; + Log($"Retrieving chapter-info {chapter} {chapterParentManga}"); + string requestUrl = chapter.url; + // Leaving this in to check if the page exists + DownloadClient.RequestResult requestResult = + downloadClient.MakeRequest(requestUrl, 1); + if ((int)requestResult.statusCode < 200 || (int)requestResult.statusCode >= 300) + { + progressToken?.Cancel(); + return requestResult.statusCode; + } + + string[] imageUrls = ParseImageUrlsFromHtml(requestUrl); + + string comicInfoPath = Path.GetTempFileName(); + File.WriteAllText(comicInfoPath, chapter.GetComicInfoXmlString()); + + return DownloadChapterImages(imageUrls, chapter.GetArchiveFilePath(settings.downloadLocation), 1, comicInfoPath, "https://mangakatana.com/", progressToken:progressToken); + } + + private string[] ParseImageUrlsFromHtml(string mangaUrl) + { + DownloadClient.RequestResult requestResult = + downloadClient.MakeRequest(mangaUrl, 1); + if ((int)requestResult.statusCode < 200 || (int)requestResult.statusCode >= 300) + { + return Array.Empty(); + } + if (requestResult.htmlDocument is null) + { + Log($"Failed to retrieve site"); + return Array.Empty(); + } + + HtmlDocument document = requestResult.htmlDocument; + + HtmlNode images = document.DocumentNode.SelectNodes("//astro-island").First(node => + node.GetAttributeValue("component-url", "").Contains("/_astro/ImageList.")); + + string weirdString = images.OuterHtml; + string weirdString2 = Regex.Match(weirdString, @"props=\""(.*)}\""").Groups[1].Value; + string[] urls = Regex.Matches(weirdString2, @"https:\/\/[A-z\-0-9\.\?\&\;\=\/]*").Select(m => m.Value.Replace("\\"]", "").Replace("amp;", "")).ToArray(); + + return urls; + } +} \ No newline at end of file diff --git a/Tranga/MangaConnectors/MangaConnectorJsonConverter.cs b/Tranga/MangaConnectors/MangaConnectorJsonConverter.cs index 62f389b..c28359f 100644 --- a/Tranga/MangaConnectors/MangaConnectorJsonConverter.cs +++ b/Tranga/MangaConnectors/MangaConnectorJsonConverter.cs @@ -34,6 +34,8 @@ public class MangaConnectorJsonConverter : JsonConverter return this._connectors.First(c => c is Mangasee); case "Mangaworld": return this._connectors.First(c => c is Mangaworld); + case "Bato": + return this._connectors.First(c => c is Bato); } throw new Exception(); diff --git a/Tranga/Tranga.cs b/Tranga/Tranga.cs index 23846b8..34b5de6 100644 --- a/Tranga/Tranga.cs +++ b/Tranga/Tranga.cs @@ -22,7 +22,8 @@ public partial class Tranga : GlobalBase new Mangasee(this), new MangaDex(this), new MangaKatana(this), - new Mangaworld(this) + new Mangaworld(this), + new Bato(this) }; jobBoss = new(this, this._connectors); StartJobBoss();