Merge branch 'movie-web:dev' into patch-1

This commit is contained in:
Screeching Bagel
2024-02-26 00:45:51 -08:00
committed by GitHub
4 changed files with 22 additions and 30 deletions

View File

@@ -9,7 +9,7 @@ export const doodScraper = makeEmbed({
name: 'dood', name: 'dood',
rank: 173, rank: 173,
async scrape(ctx) { async scrape(ctx) {
const baseUrl = 'https://do0od.com'; const baseUrl = 'https://d0000d.com';
const id = ctx.url.split('/d/')[1] || ctx.url.split('/e/')[1]; const id = ctx.url.split('/d/')[1] || ctx.url.split('/e/')[1];
@@ -18,18 +18,17 @@ export const doodScraper = makeEmbed({
baseUrl, baseUrl,
}); });
const dataForLater = doodData.match(/a\+"\?token=([^"]+)/)?.[1]; const dataForLater = doodData.match(/\?token=([^&]+)&expiry=/)?.[1];
const path = doodData.match(/\$\.get\('\/pass_md5([^']+)/)?.[1]; const path = doodData.match(/\$\.get\('\/pass_md5([^']+)/)?.[1];
const doodPage = await ctx.proxiedFetcher<string>(`/pass_md5/${path}`, { const doodPage = await ctx.proxiedFetcher<string>(`/pass_md5${path}`, {
headers: { headers: {
referer: `${baseUrl}/e/${id}`, Referer: `${baseUrl}/e/${id}`,
}, },
method: 'GET', method: 'GET',
baseUrl, baseUrl,
}); });
const downloadURL = `${doodPage}${nanoid()}?token=${dataForLater}&expiry=${Date.now()}`;
const downloadURL = `${doodPage}${nanoid()}?token=${dataForLater}${Date.now()}`;
return { return {
stream: [ stream: [
@@ -43,7 +42,7 @@ export const doodScraper = makeEmbed({
type: 'mp4', type: 'mp4',
url: downloadURL, url: downloadURL,
headers: { headers: {
referer: 'https://do0od.com/', Referer: 'https://d0000d.com/',
}, },
}, },
}, },

View File

@@ -10,18 +10,28 @@ export async function getEmbeds(ctx: ScrapeContext, id: string): Promise<EmbedsR
baseUrl: baseUrl2, baseUrl: baseUrl2,
headers: { headers: {
Referer: baseUrl, Referer: baseUrl,
cookie: '',
}, },
readHeaders: ['Set-Cookie'], readHeaders: ['Set-Cookie'],
method: 'GET', method: 'GET',
}); });
const cookies = parseSetCookie(data.headers.get('Set-Cookie') || ''); const cookies = parseSetCookie(data.headers.get('Set-Cookie') || '');
const aGoozCookie = cookies.aGooz.value;
const $ = load(data.body);
const RandomCookieName = data.body.split(`_3chk('`)[1].split(`'`)[0]; const RandomCookieName = data.body.split(`_3chk('`)[1].split(`'`)[0];
const RandomCookieValue = data.body.split(`_3chk('`)[1].split(`'`)[2]; const RandomCookieValue = data.body.split(`_3chk('`)[1].split(`'`)[2];
let aGoozCookie = '';
let cookie = '';
if (cookies && cookies.aGooz && RandomCookieName && RandomCookieValue) {
aGoozCookie = cookies.aGooz.value;
cookie = makeCookieHeader({
aGooz: aGoozCookie,
[RandomCookieName]: RandomCookieValue,
});
}
const $ = load(data.body);
const embedRedirectURLs = $('a') const embedRedirectURLs = $('a')
.map((index, element) => $(element).attr('href')) .map((index, element) => $(element).attr('href'))
.get() .get()
@@ -33,10 +43,7 @@ export async function getEmbeds(ctx: ScrapeContext, id: string): Promise<EmbedsR
ctx.fetcher ctx.fetcher
.full(url, { .full(url, {
headers: { headers: {
cookie: makeCookieHeader({ cookie,
aGooz: aGoozCookie,
[RandomCookieName]: RandomCookieValue,
}),
Referer: baseUrl2, Referer: baseUrl2,
}, },
method: 'GET', method: 'GET',

View File

@@ -21,7 +21,7 @@ async function universalScraper(ctx: ShowScrapeContext | MovieScrapeContext): Pr
export const goojaraScraper = makeSourcerer({ export const goojaraScraper = makeSourcerer({
id: 'goojara', id: 'goojara',
name: 'goojara', name: 'Goojara',
rank: 225, rank: 225,
flags: [], flags: [],
scrapeShow: universalScraper, scrapeShow: universalScraper,

View File

@@ -49,7 +49,6 @@ export async function searchAndFindMedia(
}); });
const result = results.find((res: Result) => compareMedia(media, res.title, Number(res.year))); const result = results.find((res: Result) => compareMedia(media, res.title, Number(res.year)));
return result; return result;
} }
@@ -67,19 +66,7 @@ export async function scrapeIds(
baseUrl, baseUrl,
headers: headersData, headers: headersData,
method: 'GET', method: 'GET',
}); query: { s: media.season.number.toString() },
const $1 = load(data);
const dataId = $1('#seon').attr('data-id');
if (!dataId) throw new NotFoundError('Not found');
data = await ctx.fetcher<string>(`/xhrc.php`, {
baseUrl,
headers: headersData,
method: 'POST',
body: new URLSearchParams({ s: media.season.number.toString(), t: dataId }),
}); });
let episodeId = ''; let episodeId = '';
@@ -89,7 +76,6 @@ export async function scrapeIds(
$2('.seho').each((index, element) => { $2('.seho').each((index, element) => {
// Extracting the episode number as a string // Extracting the episode number as a string
const episodeNumber = $2(element).find('.seep .sea').text().trim(); const episodeNumber = $2(element).find('.seep .sea').text().trim();
// Comparing with the desired episode number as a string // Comparing with the desired episode number as a string
if (parseInt(episodeNumber, 10) === media.episode.number) { if (parseInt(episodeNumber, 10) === media.episode.number) {
const href = $2(element).find('.snfo h1 a').attr('href'); const href = $2(element).find('.snfo h1 a').attr('href');