Merge pull request #89 from MemeCornucopia/fix-goojara

Fix Goojara Scraping and Capitalization of Id
This commit is contained in:
William Oldham
2024-02-26 08:30:57 +00:00
committed by GitHub
4 changed files with 22 additions and 30 deletions

View File

@@ -9,7 +9,7 @@ export const doodScraper = makeEmbed({
name: 'dood',
rank: 173,
async scrape(ctx) {
const baseUrl = 'https://do0od.com';
const baseUrl = 'https://d0000d.com';
const id = ctx.url.split('/d/')[1] || ctx.url.split('/e/')[1];
@@ -18,18 +18,17 @@ export const doodScraper = makeEmbed({
baseUrl,
});
const dataForLater = doodData.match(/a\+"\?token=([^"]+)/)?.[1];
const dataForLater = doodData.match(/\?token=([^&]+)&expiry=/)?.[1];
const path = doodData.match(/\$\.get\('\/pass_md5([^']+)/)?.[1];
const doodPage = await ctx.proxiedFetcher<string>(`/pass_md5/${path}`, {
const doodPage = await ctx.proxiedFetcher<string>(`/pass_md5${path}`, {
headers: {
referer: `${baseUrl}/e/${id}`,
Referer: `${baseUrl}/e/${id}`,
},
method: 'GET',
baseUrl,
});
const downloadURL = `${doodPage}${nanoid()}?token=${dataForLater}${Date.now()}`;
const downloadURL = `${doodPage}${nanoid()}?token=${dataForLater}&expiry=${Date.now()}`;
return {
stream: [
@@ -43,7 +42,7 @@ export const doodScraper = makeEmbed({
type: 'mp4',
url: downloadURL,
headers: {
referer: 'https://do0od.com/',
Referer: 'https://d0000d.com/',
},
},
},

View File

@@ -10,18 +10,28 @@ export async function getEmbeds(ctx: ScrapeContext, id: string): Promise<EmbedsR
baseUrl: baseUrl2,
headers: {
Referer: baseUrl,
cookie: '',
},
readHeaders: ['Set-Cookie'],
method: 'GET',
});
const cookies = parseSetCookie(data.headers.get('Set-Cookie') || '');
const aGoozCookie = cookies.aGooz.value;
const $ = load(data.body);
const RandomCookieName = data.body.split(`_3chk('`)[1].split(`'`)[0];
const RandomCookieValue = data.body.split(`_3chk('`)[1].split(`'`)[2];
let aGoozCookie = '';
let cookie = '';
if (cookies && cookies.aGooz && RandomCookieName && RandomCookieValue) {
aGoozCookie = cookies.aGooz.value;
cookie = makeCookieHeader({
aGooz: aGoozCookie,
[RandomCookieName]: RandomCookieValue,
});
}
const $ = load(data.body);
const embedRedirectURLs = $('a')
.map((index, element) => $(element).attr('href'))
.get()
@@ -33,10 +43,7 @@ export async function getEmbeds(ctx: ScrapeContext, id: string): Promise<EmbedsR
ctx.fetcher
.full(url, {
headers: {
cookie: makeCookieHeader({
aGooz: aGoozCookie,
[RandomCookieName]: RandomCookieValue,
}),
cookie,
Referer: baseUrl2,
},
method: 'GET',

View File

@@ -21,7 +21,7 @@ async function universalScraper(ctx: ShowScrapeContext | MovieScrapeContext): Pr
export const goojaraScraper = makeSourcerer({
id: 'goojara',
name: 'goojara',
name: 'Goojara',
rank: 225,
flags: [],
scrapeShow: universalScraper,

View File

@@ -49,7 +49,6 @@ export async function searchAndFindMedia(
});
const result = results.find((res: Result) => compareMedia(media, res.title, Number(res.year)));
return result;
}
@@ -67,19 +66,7 @@ export async function scrapeIds(
baseUrl,
headers: headersData,
method: 'GET',
});
const $1 = load(data);
const dataId = $1('#seon').attr('data-id');
if (!dataId) throw new NotFoundError('Not found');
data = await ctx.fetcher<string>(`/xhrc.php`, {
baseUrl,
headers: headersData,
method: 'POST',
body: new URLSearchParams({ s: media.season.number.toString(), t: dataId }),
query: { s: media.season.number.toString() },
});
let episodeId = '';
@@ -89,7 +76,6 @@ export async function scrapeIds(
$2('.seho').each((index, element) => {
// Extracting the episode number as a string
const episodeNumber = $2(element).find('.seep .sea').text().trim();
// Comparing with the desired episode number as a string
if (parseInt(episodeNumber, 10) === media.episode.number) {
const href = $2(element).find('.snfo h1 a').attr('href');