3 Commits

Author SHA1 Message Date
Jorrin
7cb4686e02 Merge branch 'dev' into feature/filter-duplicate-embeds 2024-04-03 22:12:33 +02:00
Jorrin
3b00d6ffc8 remove unused flag in filemoon 2024-04-03 18:37:45 +02:00
Jorrin
2f4e76747d filter out duplicate embeds 2024-04-03 18:34:04 +02:00
101 changed files with 8108 additions and 4041 deletions

View File

@@ -1,7 +0,0 @@
root = true
[*]
end_of_line = lf
insert_final_newline = true
indent_size = 2
indent_style = space

View File

@@ -1,6 +1,4 @@
dist
node_modules
.output
public
# Ignore index due to prettier removing setext headers
*.index.md
.nuxt

View File

@@ -1,6 +1,8 @@
module.exports = {
extends: ['next', 'plugin:prettier/recommended'],
root: true,
extends: '@nuxt/eslint-config',
rules: {
'@next/next/no-img-element': 'off',
},
};
'vue/max-attributes-per-line': 'off',
'vue/multi-word-component-names': 'off'
}
}

View File

@@ -1 +0,0 @@
* text=auto eol=lf

View File

@@ -1 +0,0 @@
* @movie-web/project-leads

View File

@@ -1 +0,0 @@
Please visit the [main document at primary repository](https://github.com/movie-web/movie-web/blob/dev/.github/CODE_OF_CONDUCT.md).

View File

@@ -1 +0,0 @@
Please visit the [main document at primary repository](https://github.com/movie-web/movie-web/blob/dev/.github/CONTRIBUTING.md).

View File

@@ -1,49 +0,0 @@
name: "docs-deploy"
on:
push:
branches:
- master
jobs:
build:
name: Build
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- uses: pnpm/action-setup@v2
with:
version: 8
- name: Setup Node
uses: actions/setup-node@v4
with:
node-version: "20"
cache: pnpm
- name: Install dependencies
run: pnpm install --frozen-lockfile
- name: Build
run: pnpm build
- name: Upload
uses: actions/upload-pages-artifact@v3
with:
path: ./out
deploy:
needs: build
permissions:
pages: write
id-token: write
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4

View File

@@ -1,32 +0,0 @@
name: Linting and Testing
on:
push:
branches:
- master
pull_request:
jobs:
linting:
name: Run Linters
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
- uses: pnpm/action-setup@v3
with:
version: 8
- name: Install Node.js
uses: actions/setup-node@v4
with:
node-version: 20
cache: 'pnpm'
- name: Install pnpm packages
run: pnpm install
- name: Run ESLint
run: pnpm run lint

5
.docs/.gitignore vendored
View File

@@ -2,11 +2,12 @@ node_modules
*.iml
.idea
*.log*
.nuxt
.vscode
.DS_Store
coverage
dist
sw.*
.env
out
.next
.output
.nuxt

View File

@@ -1,3 +0,0 @@
# Ignore index due to prettier removing setext headers
*.index.md
.github/CODEOWNERS

View File

@@ -1,4 +0,0 @@
{
"trailingComma": "all",
"singleQuote": true
}

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2023 movie-web
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

17
.docs/app.config.ts Normal file
View File

@@ -0,0 +1,17 @@
export default defineAppConfig({
docus: {
title: '@movie-web/providers',
description: 'For all your media scraping needs',
socials: {
github: 'movie-web/providers',
},
image: '',
aside: {
level: 0,
exclude: [],
},
header: {
logo: false,
},
},
});

View File

@@ -0,0 +1,3 @@
code > span {
white-space: pre;
}

Binary file not shown.

View File

@@ -1,18 +0,0 @@
.logo {
border-radius: 5px;
margin-left: -0.5rem;
padding: 0.5rem;
transition: transform 100ms ease-in-out, background-color 100ms ease-in-out;
}
.logo > img {
height: 1.5rem;
}
.logo:hover {
background-color: rgba(var(--colors-bgLightest));
}
.logo:active {
transform: scale(1.05);
}

View File

@@ -1,11 +0,0 @@
import Link from 'next/link';
import classes from './Logo.module.css';
import logoUrl from '../public/icon-light.png';
export function Logo() {
return (
<Link href="/" className={classes.logo}>
<img src={logoUrl.src} alt="Logo of movie-web" />
</Link>
);
}

51
.docs/content/0.index.md Normal file
View File

@@ -0,0 +1,51 @@
---
title: "@movie-web/providers | For all your media scraping needs"
navigation: false
layout: page
---
::block-hero
---
cta:
- Get Started
- /get-started/introduction
secondary:
- Open on GitHub →
- https://github.com/movie-web/providers
snippet: npm i @movie-web/providers
---
#title
@movie-web/providers
#description
Easily scrape all sorts of media sites for content
::
::card-grid
#title
What's included
#root
:ellipsis
#default
::card{icon="vscode-icons:file-type-light-json"}
#title
Scrape popular streaming websites.
#description
Don't settle for just one media site for you content, use everything that's available.
::
::card{icon="codicon:source-control"}
#title
Multi-platform.
#description
Scrape from browser or server, whichever you prefer.
::
::card{icon="logos:typescript-icon-round"}
#title
Easy to use.
#description
Get started with scraping your favourite media sites with just 5 lines of code. Fully typed of course.
::
::

View File

@@ -1,8 +1,6 @@
---
title: 'Introduction'
---
# Introduction
## What is `@movie-web/providers` ?
## What is `@movie-web/providers`?
`@movie-web/providers` is the soul of [movie-web](https://github.com/movie-web/movie-web). It's a collection of scrapers of various streaming sites. It extracts the raw streams from those sites, so you can watch them without any extra fluff from the original sites.
@@ -13,4 +11,4 @@ We support many different environments, here are a few examples:
- In a native app, scrape in the app itself
- In a backend server, scrape on the server and give the streams to the client to watch.
To find out how to configure the library for your environment, You can read [How to use on X](/essentials/usage-on-x).
To find out how to configure the library for your environment, You can read [How to use on X](../2.essentials/0.usage-on-x.md).

View File

@@ -1,24 +1,30 @@
---
title: 'Quick Start'
---
# Quick start
## Installation
Let's get started with `@movie-web/providers`. First lets install the package.
```sh npm2yarn
::code-group
```bash [NPM]
npm install @movie-web/providers
```
```bash [Yarn]
yarn add @movie-web/providers
```
```bash [PNPM]
pnpm install @movie-web/providers
```
::
## Scrape your first item
To get started with scraping on the **server**, first you have to make an instance of the providers.
<Important>
This snippet will only work on a **server**. For other environments, check out [Usage on X](/essentials/usage-on-x).
</Important>
::alert{type="warning"}
This snippet will only work on a **server**. For other environments, check out [Usage on X](../2.essentials/0.usage-on-x.md).
::
```ts title="index.ts (server)" showLineNumbers
```ts [index.ts (server)]
import { makeProviders, makeStandardFetcher, targets } from '@movie-web/providers';
// this is how the library will make http requests
@@ -36,36 +42,19 @@ const providers = makeProviders({
Perfect. You now have an instance of the providers you can reuse everywhere.
Now let's scrape an item:
```ts title="index.ts (server)" showLineNumbers
import { ScrapeMedia, makeProviders, makeStandardFetcher, targets } from '@movie-web/providers';
const myFetcher = makeStandardFetcher(fetch);
const providers = makeProviders({
fetcher: myFetcher,
target: targets.NATIVE
});
const media: ScrapeMedia = {
```ts [index.ts (server)]
// fetch some data from TMDB
const media = {
type: 'movie',
title: "Oppenheimer",
releaseYear: 2023,
tmdbId: "872585"
};
async function fetchData() {
try {
const output = await providers.runAll({
media: media,
});
console.log("Output:",output)
} catch (error) {
console.error('Error occurred:', error);
}
title: "Hamilton",
releaseYear: 2020,
tmdbId: "556574"
}
fetchData();
const output = await providers.runAll({
media: media
})
```
Now we have our stream in the output variable. (If the output is `null` then nothing could be found.)
To find out how to use the streams, check out [Using streams](/essentials/using-streams).
To find out how to use the streams, check out [Using streams](../2.essentials/4.using-streams.md).

View File

@@ -0,0 +1,5 @@
# Examples
::alert{type="warning"}
There are no examples yet, stay tuned!
::

View File

@@ -2,17 +2,6 @@
title: 'Changelog'
---
# Version 2.3.0
- Fixed RidoMovies search results
- Added Insertunit, SoaperTV, and WarezCDN providers
- Disabled Showbox and VidSrc
# Version 2.2.9
- Fixed VidSrcTo (both Vidplay and Filemoon embeds)
- Added dropload, filelions and vtube embeds to Primewire
- Fixed and enabled Smashystream
- Improved RidoMovies search results
# Version 2.2.8
- Fix package exports for CJS and ESM
- Fixed Mixdrop embed
@@ -102,9 +91,9 @@ title: 'Changelog'
# Version 2.0.0
<Warning>
::alert{type="warning"}
There are breaking changes in this list, make sure to read them thoroughly if you plan on updating.
</Warning>
::
**Development tooling:**
- Added integration test for browser. To make sure the package keeps working in the browser
@@ -125,4 +114,4 @@ There are breaking changes in this list, make sure to read them thoroughly if yo
- Export Fetcher and Stream types with all types related to it
- Providers can now return a list of streams instead of just one.
- Captions now have identifiers returned with them. Just generally useful to have
- New targets and some of them renamed
- New targets and some of them renamed

View File

@@ -0,0 +1,2 @@
icon: ph:shooting-star-fill
navigation.redirect: /get-started/introduction

View File

@@ -24,7 +24,7 @@ const providers = makeProviders({
## Browser client-side
Using the provider package client-side requires a hosted version of simple-proxy.
Read more [about proxy fetchers](/essentials/fetchers#using-fetchers-on-the-browser).
Read more [about proxy fetchers](./2.fetchers.md#using-fetchers-on-the-browser).
```ts
import { makeProviders, makeStandardFetcher, targets } from '@movie-web/providers';
@@ -41,32 +41,27 @@ const providers = makeProviders({
## React native
To use the library in a react native app, you would also need a couple of polyfills to polyfill crypto and base64.
<Steps>
<Steps.Step>
### First install the polyfills:
```sh npm2yarn
npm install @react-native-anywhere/polyfill-base64 react-native-quick-crypto
```
</Steps.Step>
<Steps.Step>
### Add the polyfills to your app:
```ts
// Import in your entry file
import '@react-native-anywhere/polyfill-base64';
```
1. First install the polyfills:
```bash
npm install @react-native-anywhere/polyfill-base64 react-native-quick-crypto
```
And follow the [react-native-quick-crypto documentation](https://github.com/margelo/react-native-quick-crypto) to set up the crypto polyfill.
</Steps.Step>
<Steps.Step>
### Then you can use the library like this:
```ts
import { makeProviders, makeStandardFetcher, targets } from '@movie-web/providers';
2. Add the polyfills to your app:
```ts
// Import in your entry file
import '@react-native-anywhere/polyfill-base64';
```
const providers = makeProviders({
fetcher: makeStandardFetcher(fetch),
target: target.NATIVE,
consistentIpForRequests: true,
})
```
</Steps.Step>
</Steps>
And follow the [react-native-quick-crypto documentation](https://github.com/margelo/react-native-quick-crypto) to set up the crypto polyfill.
3. Then you can use the library like this:
```ts
import { makeProviders, makeStandardFetcher, targets } from '@movie-web/providers';
const providers = makeProviders({
fetcher: makeStandardFetcher(fetch),
target: target.NATIVE,
consistentIpForRequests: true,
})
```

View File

@@ -2,13 +2,13 @@
When creating provider controls, you will immediately be required to choose a target.
<Warning>
::alert{type="warning"}
A target is the device on which the stream will be played.
**Where the scraping is being run has nothing to do with the target**, only where the stream is finally played in the end is significant in choosing a target.
</Warning>
**Where the scraping is run has nothing to do with the target**, only where the stream is finally played in the end is significant in choosing a target.
::
#### Possible targets
- **`targets.BROWSER`** Stream will be played in a browser with CORS
- **`targets.BROWSER_EXTENSION`** Stream will be played in a browser using the movie-web extension (WIP)
- **`targets.NATIVE`** Stream will be played on a native video player
- **`targets.ANY`** No restrictions for selecting streams, will just give all of them
- **`targets.ANY`** No restrictions for selecting streams, will just give all of them

View File

@@ -71,4 +71,4 @@ const myFetcher: Fetcher = (url, ops) => {
statusCode: 200,
};
}
```
```

View File

@@ -71,4 +71,4 @@ const providers = buildProviders()
}
})
.build();
```
```

View File

@@ -4,13 +4,13 @@ Streams can sometimes be quite picky on how they can be used. So here is a guide
## Essentials
All streams have the same common parameters :
- `Stream.type` : The type of stream. Either `hls` or `file`
- `Stream.id` : The id of this stream, unique per scraper output.
- `Stream.flags` : A list of flags that apply to this stream. Most people won't need to use it.
- `Stream.captions` : A list of captions/subtitles for this stream.
- `Stream.headers` : Either undefined or a key value object of headers you must set to use the stream.
- `Stream.preferredHeaders` : Either undefined or a key value object of headers you may want to set if you want optimal playback - but not required.
All streams have the same common parameters:
- `Stream.type`: The type of stream. Either `hls` or `file`
- `Stream.id`: The id of this stream, unique per scraper output.
- `Stream.flags`: A list of flags that apply to this stream. Most people won't need to use it.
- `Stream.captions`: A list of captions/subtitles for this stream.
- `Stream.headers`: Either undefined or a key value object of headers you must set to use the stream.
- `Stream.preferredHeaders`: Either undefined or a key value object of headers you may want to set if you want optimal playback - but not required.
Now let's delve deeper into how to watch these streams!
@@ -22,7 +22,7 @@ These streams have an extra property `Stream.playlist` which contains the m3u8 p
Here is a code sample of how to use HLS streams in web context using hls.js
```html
<script src="https ://cdn.jsdelivr.net/npm/hls.js@1"></script>
<script src="https://cdn.jsdelivr.net/npm/hls.js@1"></script>
<video id="video"></video>
<script>
@@ -39,17 +39,17 @@ Here is a code sample of how to use HLS streams in web context using hls.js
## Streams with type `file`
File streams are quite easy to use, they just return a new property : `Stream.qualities`.
File streams are quite easy to use, they just return a new property: `Stream.qualities`.
This property is a map of quality and a stream file. So if you want to get 1080p quality you do `stream["1080"]` to get your stream file. It will return undefined if that quality is absent.
The possibly qualities are : `unknown`, `360`, `480`, `720`, `1080`, `4k`.
The possibly qualities are: `unknown`, `360`, `480`, `720`, `1080`, `4k`.
File based streams are always guaranteed to have one quality.
Once you get a streamfile, you have the following parameters :
- `StreamFile.type` : Right now it can only be `mp4`.
- `StreamFile.url` : The URL linking to the video file.
Once you get a streamfile, you have the following parameters:
- `StreamFile.type`: Right now it can only be `mp4`.
- `StreamFile.url`: The URL linking to the video file.
Here is a code sample of how to watch a file based stream in a browser :
Here is a code sample of how to watch a file based stream in a browser:
```html
<video id="video"></video>
@@ -72,13 +72,13 @@ If your target is set to `BROWSER`, headers will never be required, as it's not
## Using captions/subtitles
All streams have a list of captions at `Stream.captions`. The structure looks like this :
All streams have a list of captions at `Stream.captions`. The structure looks like this:
```ts
type Caption = {
type : CaptionType; // Language type, either "srt" or "vtt"
id : string; // Unique per stream
url : string; // The URL pointing to the subtitle file
hasCorsRestrictions : boolean; // If true, you will need to proxy it if you're running in a browser
language : string; // Language code of the caption
type: CaptionType; // Language type, either "srt" or "vtt"
id: string; // Unique per stream
url: string; // The URL pointing to the subtitle file
hasCorsRestrictions: boolean; // If true, you will need to proxy it if you're running in a browser
language: string; // Language code of the caption
};
```
```

View File

@@ -0,0 +1,3 @@
icon: ph:info-fill
navigation.redirect: /essentials/usage
navigation.title: "Get started"

View File

@@ -1,11 +1,11 @@
# Sources vs embeds
<Warning>
::alert{type="warning"}
This page isn't quite done yet, stay tuned!
</Warning>
::
{/*
<!--
TODO
- How do sources and embeds differ
- How do sources and embeds interact
*/}
-->

View File

@@ -1,12 +1,12 @@
# New providers
<Warning>
::alert{type="warning"}
This page isn't quite done yet, stay tuned!
</Warning>
::
{/*
<!--
TODO
- How to make new sources or embeds
- Ranking
- Link to flags
*/}
-->

View File

@@ -6,5 +6,5 @@ For example, some sources only give back content that has the CORS headers set t
This concept is applied in multiple away across the library.
## Flag options
- `CORS_ALLOWED` : Headers from the output streams are set to allow any origin.
- `IP_LOCKED` : The streams are locked by IP: requester and watcher must be the same.
- `CORS_ALLOWED`: Headers from the output streams are set to allow any origin.
- `IP_LOCKED`: The streams are locked by IP: requester and watcher must be the same.

View File

@@ -0,0 +1,3 @@
icon: ph:atom-fill
navigation.redirect: /in-depth/sources-and-embeds
navigation.title: "In-depth"

View File

@@ -1,16 +1,16 @@
# Development / contributing
<Warning>
::alert{type="warning"}
This page isn't quite done yet, stay tuned!
</Warning>
::
{/*
<!--
TODO
- Development setup
- How to make new sources/embeds (link to the page)
- How to use the fetchers, when to use proxiedFetcher
- How to use the context
*/}
-->
## Testing using the CLI
@@ -27,7 +27,7 @@ Then make sure you've run `npm i` to get all the dependencies.
To run the CLI without needing to learn all the arguments, simply run the following command and go with the flow.
```sh npm2yarn
```sh
npm run cli
```
@@ -35,18 +35,18 @@ npm run cli
For repeatability, it can be useful to specify the arguments one by one.
To see all the arguments, you can run the help command:
```sh
```sh
npm run cli -- -h
```
Then just run it with your arguments, for example:
```sh
```sh
npm run cli -- -sid showbox -tid 556574
```
### Examples
```sh
```sh
# Spirited away - showbox
npm run cli -- -sid showbox -tid 129
@@ -63,10 +63,10 @@ npm run cli -- -sid febbox-mp4 -u URL_HERE
### Fetcher options
The CLI comes with a few built-in fetchers:
- `node-fetch` : Fetch using the "node-fetch" library.
- `native` : Use the new fetch built into Node.JS (undici).
- `browser` : Start up headless chrome, and run the library in that context using a proxied fetcher.
- `node-fetch`: Fetch using the "node-fetch" library.
- `native`: Use the new fetch built into Node.JS (undici).
- `browser`: Start up headless chrome, and run the library in that context using a proxied fetcher.
<Warning>
::alert{type="warning"}
The browser fetcher will require you to run `npm run build` before running the CLI. Otherwise you will get outdated results.
</Warning>
::

View File

@@ -0,0 +1,3 @@
icon: ph:aperture-fill
navigation.redirect: /extra-topics/development
navigation.title: "Extra topics"

View File

@@ -6,10 +6,8 @@ You can attach events if you need to know what is going on while it is processin
## Example
```ts
import { ScrapeMedia, targets } from '@movie-web/providers';
// media from TMDB
const media : ScrapeMedia = {
const media = {
type: 'movie',
title: 'Hamilton',
releaseYear: 2020,

View File

@@ -5,10 +5,10 @@ Run a specific source scraper and get its emitted streams.
## Example
```ts
import { ScrapeMedia , SourcererOutput, NotFoundError } from '@movie-web/providers';
import { SourcererOutput, NotFoundError } from '@movie-web/providers';
// media from TMDB
const media : ScrapeMedia = {
const media = {
type: 'movie',
title: 'Hamilton',
releaseYear: 2020,

5
.docs/next-env.d.ts vendored
View File

@@ -1,5 +0,0 @@
/// <reference types="next" />
/// <reference types="next/image-types/global" />
// NOTE: This file should not be edited
// see https://nextjs.org/docs/basic-features/typescript for more information.

View File

@@ -1,10 +0,0 @@
import { guider } from '@neato/guider';
const withGuider = guider({
themeConfig: './theme.config.tsx',
});
export default withGuider({
output: 'export',
basePath: '/providers',
});

21
.docs/nuxt.config.ts Executable file
View File

@@ -0,0 +1,21 @@
export default defineNuxtConfig({
// https://github.com/nuxt-themes/docus
extends: '@nuxt-themes/docus',
css: [
'@/assets/css/main.css',
],
build: {
transpile: [
"chalk"
]
},
modules: [
// https://github.com/nuxt-modules/plausible
'@nuxtjs/plausible',
// https://github.com/nuxt/devtools
'@nuxt/devtools'
]
})

View File

@@ -1,28 +1,22 @@
{
"name": "providers-docs",
"version": "0.2.0",
"version": "0.1.0",
"private": true,
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start",
"lint": "next lint",
"lint:fix": "next lint --fix"
"dev": "nuxi dev",
"build": "nuxi build",
"generate": "nuxi generate",
"preview": "nuxi preview",
"lint": "eslint .",
"preinstall": "npx -y only-allow pnpm"
},
"devDependencies": {
"@types/react": "18.2.73",
"eslint": "^8.56.0",
"eslint-config-next": "^14.1.4",
"eslint-config-prettier": "^9.1.0",
"eslint-plugin-prettier": "^5.1.2",
"prettier": "^3.1.1",
"typescript": "5.4.3"
},
"dependencies": {
"@neato/guider": "^0.1.5",
"next": "^14.1.4",
"next-seo": "^6.5.0",
"react": "^18.2.0",
"react-dom": "^18.2.0"
"@nuxt-themes/docus": "^1.13.1",
"@nuxt/devtools": "^1.0.1",
"@nuxt/eslint-config": "^0.1.1",
"@nuxtjs/plausible": "^0.2.1",
"@types/node": "^20.4.0",
"eslint": "^8.44.0",
"nuxt": "^3.6.2"
}
}

View File

@@ -1,3 +0,0 @@
import { createNotFoundPage } from '@neato/guider/client';
export default createNotFoundPage();

View File

@@ -1,4 +0,0 @@
import '@neato/guider/style.css';
import { createGuiderApp } from '@neato/guider/client';
export default createGuiderApp();

View File

@@ -1,3 +0,0 @@
import { createRedirect } from '@neato/guider/client';
export default createRedirect({ to: '/api-reference/makeProviders' });

View File

@@ -1,3 +0,0 @@
import { createRedirect } from '@neato/guider/client';
export default createRedirect({ to: '/essentials/usage-on-x' });

View File

@@ -1,3 +0,0 @@
import { createRedirect } from '@neato/guider/client';
export default createRedirect({ to: '/in-depth/sources-vs-embeds' });

View File

@@ -1,7 +0,0 @@
---
title: 'Examples'
---
<Note>
Coming soon
</Note>

View File

@@ -1,3 +0,0 @@
import { createRedirect } from '@neato/guider/client';
export default createRedirect({ to: '/get-started/introduction' });

View File

@@ -1,3 +0,0 @@
import { createRedirect } from '@neato/guider/client';
export default createRedirect({ to: '/in-depth/sources-vs-embeds' });

View File

@@ -1,42 +0,0 @@
import {
Button,
Card,
CardGrid,
GuiderLayout,
Hero,
} from '@neato/guider/client';
export default function LandingPage() {
return (
<GuiderLayout meta={{ layout: 'page' }}>
<Hero>
<Hero.Badge title="V2.3.0" to="/get-started/changelog">
See changelog for more
</Hero.Badge>
<Hero.Title>@movie-web/providers</Hero.Title>
<Hero.Subtitle>
Easily scrape all sorts of media sites for content.
</Hero.Subtitle>
<Hero.Actions>
<Button to="/get-started/introduction">Get Started</Button>
<Button to="https://github.com/movie-web/providers" type="secondary">
Open on GitHub
</Button>
</Hero.Actions>
</Hero>
<CardGrid>
<Card icon="mdi:code-json" title="Scrape popular streaming websites.">
Don&apos;t settle for just one media site for you content, use
everything that&apos;s available.
</Card>
<Card icon="mdi:git" title="Multi-platform.">
Scrape from browser or server, whichever you prefer.
</Card>
<Card icon="mdi:language-typescript" title="Easy to use.">
Get started with scraping your favourite media sites with just 5 lines
of code. Fully typed of course.
</Card>
</CardGrid>
</GuiderLayout>
);
}

10120
.docs/pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

Binary file not shown.

Before

Width:  |  Height:  |  Size: 887 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 328 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

8
.docs/renovate.json Executable file
View File

@@ -0,0 +1,8 @@
{
"extends": [
"@nuxtjs"
],
"lockFileMaintenance": {
"enabled": true
}
}

View File

@@ -1,112 +0,0 @@
import { defineTheme, directory, group, link, social } from '@neato/guider/theme';
import { Logo } from './components/Logo';
import { NextSeo } from 'next-seo';
import coverUrl from "./public/cover.png";
import faviconUrl from "./public/favicon.ico";
export default defineTheme({
github: "movie-web/providers",
contentFooter: {
text: "Made with 💜",
editRepositoryBase: "https://github.com/movie-web/providers",
socials: [
social.github("https://github.com/movie-web/providers"),
social.discord("https://movie-web.github.io/links/discord"),
]
},
meta: (pageMeta) => (
<NextSeo {...{
title: `${pageMeta.title ?? "For all your media scraping needs"} | movie-web`,
description: pageMeta.description ?? "movie-web/providers : Easily scrape all sorts of media sites for content.",
openGraph: {
images: [{
url: coverUrl.src,
}],
title: `${pageMeta.title ?? "For all your media scraping needs"} | movie-web`,
description: pageMeta.description ?? "movie-web/providers : Easily scrape all sorts of media sites for content.",
},
twitter: {
cardType: 'summary_large_image',
},
additionalLinkTags: [
{
href: faviconUrl.src,
rel: "icon",
type: "image/x-icon",
}
]
}} />
),
settings: {
logo: () => <Logo />,
colors: {
primary: "#E67070",
primaryLighter: "#E59595",
primaryDarker: "#D21818",
background: "#000000",
backgroundLighter: "#141414",
backgroundLightest: "#292929",
backgroundDarker: "#000000",
line: "#404040",
text: "#B3B3B3",
textLighter: "#CCCCCC",
textHighlight: "#cccccc",
codeWarning: '#222D20',
codeError: '#2B1B1F',
codeGreen: '#0B2823',
codeHighlight: '#0E2429',
codeWordHighlight: '#365C68',
semanticTip: '#39B864',
semanticTipLighter: '#75F2B6',
semanticNote: '#817EF3',
semanticNoteLighter: '#B9B8FC',
semanticImportant: '#A958E8',
semanticImportantLighter: '#D3A2F9',
semanticWarning: '#fff708',
semanticWarningLighter: '#faf8b1',
semanticCaution: '#FC6359',
semanticCautionLighter: '#FFA59F',
},
backgroundPattern: 'flare',
},
directories: [
directory("main", {
sidebar: [
group("Get Started", [
link("Introduction", "/get-started/introduction"),
link("Quickstart", "/get-started/quick-start"),
link("Examples", "/get-started/examples"),
link("Changelog", "/get-started/changelog"),
]),
group("Essentials", [
link("Usage on X", "/essentials/usage-on-x"),
link("Targets", "/essentials/targets"),
link("Fetchers", "/essentials/fetchers"),
link("Customize Providers", "/essentials/customize-providers"),
link("Using Streams", "/essentials/using-streams"),
]),
group("In Depth", [
link("Sources vs Embeds", "/in-depth/sources-vs-embeds"),
link("New Providers", "/in-depth/new-providers"),
link("Flags", "/in-depth/flags"),
]),
group("Extra Topics", [
link("Development and Contributing", "/extra-topics/development"),
]),
group("Api Reference", [
link("makeProviders", "/api-reference/makeProviders"),
link("ProviderControls.runAll", "/api-reference/ProviderControlsRunAll"),
link("ProviderControls.runSourceScraper", "/api-reference/ProviderControlsrunSourceScraper"),
link("ProviderControls.runEmbedScraper", "/api-reference/ProviderControlsrunEmbedScraper"),
link("ProviderControls.listSources", "/api-reference/ProviderControlslistSources"),
link("ProviderControls.listEmbeds", "/api-reference/ProviderControlslistEmbeds"),
link("ProviderControls.getMetadata", "/api-reference/ProviderControlsgetMetadata"),
link("makeStandardFetcher", "/api-reference/makeStandardFetcher"),
link("makeSimpleProxyFetcher", "/api-reference/makeSimpleProxyFetcher"),
])
]
})
],
});

18
.docs/tokens.config.ts Normal file
View File

@@ -0,0 +1,18 @@
import { defineTheme } from 'pinceau'
export default defineTheme({
color: {
primary: {
50: "#F5E5FF",
100: "#E7CCFF",
200: "#D4A9FF",
300: "#BE85FF",
400: "#A861FF",
500: "#8E3DFF",
600: "#7F36D4",
700: "#662CA6",
800: "#552578",
900: "#441E49"
}
}
})

View File

@@ -1,28 +1,3 @@
{
"compilerOptions": {
"lib": [
"dom",
"dom.iterable",
"esnext"
],
"allowJs": true,
"skipLibCheck": true,
"strict": false,
"noEmit": true,
"incremental": true,
"esModuleInterop": true,
"module": "esnext",
"moduleResolution": "Bundler",
"resolveJsonModule": true,
"isolatedModules": true,
"jsx": "preserve"
},
"include": [
"next-env.d.ts",
"**/*.ts",
"**/*.tsx"
],
"exclude": [
"node_modules"
]
"extends": "./.nuxt/tsconfig.json"
}

View File

@@ -1,6 +1,6 @@
{
"name": "@movie-web/providers",
"version": "2.3.0",
"version": "2.2.8",
"description": "Package that contains all the providers of movie-web",
"type": "module",
"main": "./lib/index.js",

View File

@@ -14,40 +14,31 @@ import { vidsrcembedScraper } from '@/providers/embeds/vidsrc';
import { vTubeScraper } from '@/providers/embeds/vtube';
import { flixhqScraper } from '@/providers/sources/flixhq/index';
import { goMoviesScraper } from '@/providers/sources/gomovies/index';
import { insertunitScraper } from '@/providers/sources/insertunit';
import { kissAsianScraper } from '@/providers/sources/kissasian/index';
import { lookmovieScraper } from '@/providers/sources/lookmovie';
import { remotestreamScraper } from '@/providers/sources/remotestream';
import { showboxScraper } from '@/providers/sources/showbox/index';
import { tugaflixScraper } from '@/providers/sources/tugaflix';
import { vidsrcScraper } from '@/providers/sources/vidsrc/index';
import { zoechipScraper } from '@/providers/sources/zoechip';
import { bflixScraper } from './embeds/bflix';
import { closeLoadScraper } from './embeds/closeload';
import { fileMoonScraper } from './embeds/filemoon';
import { fileMoonMp4Scraper } from './embeds/filemoon/mp4';
import { ridooScraper } from './embeds/ridoo';
import { smashyStreamOScraper } from './embeds/smashystream/opstream';
import { smashyStreamDScraper } from './embeds/smashystream/dued';
import { smashyStreamFScraper } from './embeds/smashystream/video1';
import { streamtapeScraper } from './embeds/streamtape';
import { streamvidScraper } from './embeds/streamvid';
import { vidCloudScraper } from './embeds/vidcloud';
import { vidplayScraper } from './embeds/vidplay';
import { voeScraper } from './embeds/voe';
import { warezcdnembedHlsScraper } from './embeds/warezcdn/hls';
import { warezcdnembedMp4Scraper } from './embeds/warezcdn/mp4';
import { wootlyScraper } from './embeds/wootly';
import { goojaraScraper } from './sources/goojara';
import { hdRezkaScraper } from './sources/hdrezka';
import { nepuScraper } from './sources/nepu';
import { nitesScraper } from './sources/nites';
import { primewireScraper } from './sources/primewire';
import { ridooMoviesScraper } from './sources/ridomovies';
import { smashyStreamScraper } from './sources/smashystream';
import { soaperTvScraper } from './sources/soapertv';
import { vidSrcToScraper } from './sources/vidsrcto';
import { warezcdnScraper } from './sources/warezcdn';
export function gatherAllSources(): Array<Sourcerer> {
// all sources are gathered here
@@ -67,11 +58,6 @@ export function gatherAllSources(): Array<Sourcerer> {
goojaraScraper,
hdRezkaScraper,
primewireScraper,
warezcdnScraper,
insertunitScraper,
nitesScraper,
soaperTvScraper,
tugaflixScraper,
];
}
@@ -89,11 +75,10 @@ export function gatherAllEmbeds(): Array<Embed> {
vidsrcembedScraper,
streambucketScraper,
smashyStreamFScraper,
smashyStreamOScraper,
smashyStreamDScraper,
ridooScraper,
closeLoadScraper,
fileMoonScraper,
fileMoonMp4Scraper,
vidplayScraper,
wootlyScraper,
doodScraper,
@@ -103,8 +88,5 @@ export function gatherAllEmbeds(): Array<Embed> {
droploadScraper,
filelionsScraper,
vTubeScraper,
warezcdnembedHlsScraper,
warezcdnembedMp4Scraper,
bflixScraper,
];
}

View File

@@ -1,42 +0,0 @@
import { unpack } from 'unpacker';
import { makeEmbed } from '@/providers/base';
const evalCodeRegex = /eval\((.*)\)/g;
const mp4Regex = /https?:\/\/.*\.mp4/;
export const bflixScraper = makeEmbed({
id: 'bflix',
name: 'bFlix',
rank: 113,
scrape: async (ctx) => {
const mainPage = await ctx.proxiedFetcher<string>(ctx.url);
const evalCode = mainPage.match(evalCodeRegex);
if (!evalCode) throw new Error('Failed to find eval code');
const unpacked = unpack(evalCode[0]);
const file = unpacked.match(mp4Regex);
if (!file?.[0]) throw new Error('Failed to find file');
return {
stream: [
{
id: 'primary',
type: 'file',
flags: [],
captions: [],
qualities: {
unknown: {
type: 'mp4',
url: file[0],
},
},
headers: {
Referer: 'https://bflix.gs/',
},
},
],
};
},
});

View File

@@ -11,7 +11,7 @@ const fileRegex = /file:"(.*?)"/g;
export const fileMoonScraper = makeEmbed({
id: 'filemoon',
name: 'Filemoon',
rank: 300,
rank: 400,
scrape: async (ctx) => {
const embedRes = await ctx.proxiedFetcher<string>(ctx.url, {
headers: {

View File

@@ -1,37 +0,0 @@
import { NotFoundError } from '@/utils/errors';
import { makeEmbed } from '../../base';
import { fileMoonScraper } from './index';
export const fileMoonMp4Scraper = makeEmbed({
id: 'filemoon-mp4',
name: 'Filemoon MP4',
rank: 400,
scrape: async (ctx) => {
const result = await fileMoonScraper.scrape(ctx);
if (!result.stream) throw new NotFoundError('Failed to find result');
if (result.stream[0].type !== 'hls') throw new NotFoundError('Failed to find hls stream');
const url = result.stream[0].playlist.replace(/\/hls2\//, '/download/').replace(/\.m3u8/, '.mp4');
return {
stream: [
{
id: 'primary',
type: 'file',
qualities: {
unknown: {
type: 'mp4',
url,
},
},
flags: [],
captions: result.stream[0].captions,
},
],
};
},
});

View File

@@ -0,0 +1,71 @@
import { load } from 'cheerio';
import { flags } from '@/entrypoint/utils/targets';
import { makeEmbed } from '@/providers/base';
type DPlayerSourcesResponse = {
title: string;
id: string;
file: string;
}[];
export const smashyStreamDScraper = makeEmbed({
id: 'smashystream-d',
name: 'SmashyStream (D)',
rank: 71,
async scrape(ctx) {
const mainPageRes = await ctx.proxiedFetcher<string>(ctx.url, {
headers: {
Referer: ctx.url,
},
});
const mainPageRes$ = load(mainPageRes);
const iframeUrl = mainPageRes$('iframe').attr('src');
if (!iframeUrl) throw new Error(`[${this.name}] failed to find iframe url`);
const mainUrl = new URL(iframeUrl);
const iframeRes = await ctx.proxiedFetcher<string>(iframeUrl, {
headers: {
Referer: ctx.url,
},
});
const textFilePath = iframeRes.match(/"file":"([^"]+)"/)?.[1];
const csrfToken = iframeRes.match(/"key":"([^"]+)"/)?.[1];
if (!textFilePath || !csrfToken) throw new Error(`[${this.name}] failed to find text file url or token`);
const textFileUrl = `${mainUrl.origin}${textFilePath}`;
const textFileRes = await ctx.proxiedFetcher<DPlayerSourcesResponse>(textFileUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
'X-CSRF-TOKEN': csrfToken,
Referer: iframeUrl,
},
});
// Playlists in Hindi, English, Tamil and Telugu are available. We only get the english one.
const textFilePlaylist = textFileRes.find((x) => x.title === 'English')?.file;
if (!textFilePlaylist) throw new Error(`[${this.name}] failed to find an english playlist`);
const playlistRes = await ctx.proxiedFetcher<string>(
`${mainUrl.origin}/playlist/${textFilePlaylist.slice(1)}.txt`,
{
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
'X-CSRF-TOKEN': csrfToken,
Referer: iframeUrl,
},
},
);
return {
stream: [
{
id: 'primary',
playlist: playlistRes,
type: 'hls',
flags: [flags.CORS_ALLOWED],
captions: [],
},
],
};
},
});

View File

@@ -1,17 +0,0 @@
import { makeEmbed } from '@/providers/base';
import { smashyStreamFScraper } from './video1';
export const smashyStreamOScraper = makeEmbed({
// the scraping logic for all smashystream embeds is the same
// all the embeds can be added in the same way
id: 'smashystream-o',
name: 'SmashyStream (O)',
rank: 70,
async scrape(ctx) {
const result = await smashyStreamFScraper.scrape(ctx);
return {
stream: result.stream,
};
},
});

View File

@@ -1,29 +1,16 @@
import { flags } from '@/entrypoint/utils/targets';
import { makeEmbed } from '@/providers/base';
import { Caption, getCaptionTypeFromUrl, labelToLanguageCode } from '@/providers/captions';
import { NotFoundError } from '@/utils/errors';
type FPlayerResponse = {
sourceUrls: string[];
subtitles: string | null;
subtitleUrls: string;
};
// if you don't understand how this is reversed
// check https://discord.com/channels/871713465100816424/1186646348137775164/1225644477188935770
// feel free to reach out atpn or ciaran_ds on discord if you've any problems
function decode(str: string): string {
const b = ['U0ZML2RVN0IvRGx4', 'MGNhL0JWb0kvTlM5', 'Ym94LzJTSS9aU0Zj', 'SGJ0L1dGakIvN0dX', 'eE52L1QwOC96N0Yz'];
let formatedB64 = str.slice(2);
for (let i = 4; i > -1; i--) {
formatedB64 = formatedB64.replace(`//${b[i]}`, '');
}
return atob(formatedB64);
}
export const smashyStreamFScraper = makeEmbed({
id: 'smashystream-f',
name: 'SmashyStream (F)',
rank: 71,
rank: 70,
async scrape(ctx) {
const res = await ctx.proxiedFetcher<FPlayerResponse>(ctx.url, {
headers: {
@@ -31,20 +18,15 @@ export const smashyStreamFScraper = makeEmbed({
},
});
if (!res.sourceUrls[0]) throw new NotFoundError('No watchable item found');
const playlist = decode(res.sourceUrls[0]);
if (!playlist.includes('.m3u8')) throw new Error('Failed to decode');
const captions: Caption[] =
res.subtitles
?.match(/\[([^\]]+)\](https?:\/\/\S+?)(?=,\[|$)/g)
res.subtitleUrls
.match(/\[([^\]]+)\](https?:\/\/\S+?)(?=,\[|$)/g)
?.map<Caption | null>((entry: string) => {
const match = entry.match(/\[([^\]]+)\](https?:\/\/\S+?)(?=,\[|$)/);
if (match) {
const [, language, url] = match;
if (language && url) {
const languageCode = labelToLanguageCode(language.replace(/ - .*/, ''));
const languageCode = labelToLanguageCode(language);
const captionType = getCaptionTypeFromUrl(url);
if (!languageCode || !captionType) return null;
return {
@@ -64,7 +46,7 @@ export const smashyStreamFScraper = makeEmbed({
stream: [
{
id: 'primary',
playlist,
playlist: res.sourceUrls[0],
type: 'hls',
flags: [flags.CORS_ALLOWED],
captions,

View File

@@ -1,5 +1,5 @@
import { flags } from '@/entrypoint/utils/targets';
import { makeEmbed } from '@/providers/base';
import { vidsrcRCPBase } from '@/providers/sources/vidsrc/common';
const hlsURLRegex = /file:"(.*?)"/;
const setPassRegex = /var pass_path = "(.*set_pass\.php.*)";/;
@@ -32,35 +32,28 @@ export const vidsrcembedScraper = makeEmbed({
if (!finalUrl.includes('.m3u8')) throw new Error('Unable to find HLS playlist');
let setPassLink = html.match(setPassRegex)?.[1];
if (!setPassLink) throw new Error('Unable to find set_pass.php link');
// isn't neeeded, the stream works without it anyway
// shouldn't fail if the setpass link is not found
if (setPassLink) {
if (setPassLink.startsWith('//')) {
setPassLink = `https:${setPassLink}`;
}
// VidSrc uses a password endpoint to temporarily whitelist the user's IP. This is called in an interval by the player.
// It currently has no effect on the player itself, the content plays fine without it.
// In the future we might have to introduce hooks for the frontend to call this endpoint.
await ctx.proxiedFetcher(setPassLink, {
headers: {
referer: ctx.url,
},
});
if (setPassLink.startsWith('//')) {
setPassLink = `https:${setPassLink}`;
}
// VidSrc uses a password endpoint to temporarily whitelist the user's IP. This is called in an interval by the player.
// It currently has no effect on the player itself, the content plays fine without it.
// In the future we might have to introduce hooks for the frontend to call this endpoint.
await ctx.proxiedFetcher(setPassLink, {
headers: {
referer: ctx.url,
},
});
return {
stream: [
{
id: 'primary',
type: 'hls',
playlist: finalUrl,
headers: {
Referer: vidsrcRCPBase,
Origin: vidsrcRCPBase,
},
flags: [],
flags: [flags.CORS_ALLOWED],
captions: [],
},
],

View File

@@ -1,58 +0,0 @@
import { warezcdnPlayerBase } from '@/providers/sources/warezcdn/common';
import { EmbedScrapeContext } from '@/utils/context';
import { NotFoundError } from '@/utils/errors';
function decrypt(input: string) {
let output = atob(input);
// Remove leading and trailing whitespaces
output = output.trim();
// Reverse the string
output = output.split('').reverse().join('');
// Get the last 5 characters and reverse them
let last = output.slice(-5);
last = last.split('').reverse().join('');
// Remove the last 5 characters from the original string
output = output.slice(0, -5);
// Return the original string concatenated with the reversed last 5 characters
return `${output}${last}`;
}
export async function getDecryptedId(ctx: EmbedScrapeContext) {
const page = await ctx.proxiedFetcher<string>(`/player.php`, {
baseUrl: warezcdnPlayerBase,
headers: {
Referer: `${warezcdnPlayerBase}/getEmbed.php?${new URLSearchParams({
id: ctx.url,
sv: 'warezcdn',
})}`,
},
query: {
id: ctx.url,
},
});
const allowanceKey = page.match(/let allowanceKey = "(.*?)";/)?.[1];
if (!allowanceKey) throw new NotFoundError('Failed to get allowanceKey');
const streamData = await ctx.proxiedFetcher<string>('/functions.php', {
baseUrl: warezcdnPlayerBase,
method: 'POST',
body: new URLSearchParams({
getVideo: ctx.url,
key: allowanceKey,
}),
});
const stream = JSON.parse(streamData);
if (!stream.id) throw new NotFoundError("can't get stream id");
const decryptedId = decrypt(stream.id);
if (!decryptedId) throw new NotFoundError("can't get file id");
return decryptedId;
}

View File

@@ -1,44 +0,0 @@
import { flags } from '@/entrypoint/utils/targets';
import { makeEmbed } from '@/providers/base';
import { EmbedScrapeContext } from '@/utils/context';
import { NotFoundError } from '@/utils/errors';
import { getDecryptedId } from './common';
// Method found by atpn
async function getVideowlUrlStream(ctx: EmbedScrapeContext, decryptedId: string) {
const sharePage = await ctx.proxiedFetcher<string>('https://cloud.mail.ru/public/uaRH/2PYWcJRpH');
const regex = /"videowl_view":\{"count":"(\d+)","url":"([^"]+)"\}/g;
const videowlUrl = regex.exec(sharePage)?.[2];
if (!videowlUrl) throw new NotFoundError('Failed to get videoOwlUrl');
return `${videowlUrl}/0p/${btoa(decryptedId)}.m3u8?${new URLSearchParams({
double_encode: '1',
})}`;
}
export const warezcdnembedHlsScraper = makeEmbed({
id: 'warezcdnembedhls', // WarezCDN is both a source and an embed host
name: 'WarezCDN HLS',
rank: 83,
async scrape(ctx) {
const decryptedId = await getDecryptedId(ctx);
if (!decryptedId) throw new NotFoundError("can't get file id");
const streamUrl = await getVideowlUrlStream(ctx, decryptedId);
return {
stream: [
{
id: 'primary',
type: 'hls',
flags: [flags.IP_LOCKED],
captions: [],
playlist: streamUrl,
},
],
};
},
});

View File

@@ -1,58 +0,0 @@
import { flags } from '@/entrypoint/utils/targets';
import { makeEmbed } from '@/providers/base';
import { warezcdnWorkerProxy } from '@/providers/sources/warezcdn/common';
import { EmbedScrapeContext } from '@/utils/context';
import { NotFoundError } from '@/utils/errors';
import { getDecryptedId } from './common';
const cdnListing = [50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64];
async function checkUrls(ctx: EmbedScrapeContext, fileId: string) {
for (const id of cdnListing) {
const url = `https://cloclo${id}.cloud.mail.ru/weblink/view/${fileId}`;
const response = await ctx.proxiedFetcher.full(url, {
method: 'GET',
headers: {
Range: 'bytes=0-1',
},
});
if (response.statusCode === 206) return url;
}
return null;
}
export const warezcdnembedMp4Scraper = makeEmbed({
id: 'warezcdnembedmp4', // WarezCDN is both a source and an embed host
name: 'WarezCDN MP4',
rank: 82,
disabled: false,
async scrape(ctx) {
const decryptedId = await getDecryptedId(ctx);
if (!decryptedId) throw new NotFoundError("can't get file id");
const streamUrl = await checkUrls(ctx, decryptedId);
if (!streamUrl) throw new NotFoundError("can't get stream id");
return {
stream: [
{
id: 'primary',
captions: [],
qualities: {
unknown: {
type: 'mp4',
url: `${warezcdnWorkerProxy}/?${new URLSearchParams({
url: streamUrl,
})}`,
},
},
type: 'file',
flags: [flags.CORS_ALLOWED],
},
],
};
},
});

View File

@@ -1,30 +0,0 @@
import { Caption, removeDuplicatedLanguages } from '@/providers/captions';
import { Subtitle } from './types';
export async function getCaptions(data: Subtitle[]) {
let captions: Caption[] = [];
for (const subtitle of data) {
let language = '';
if (subtitle.name.includes('Рус')) {
language = 'ru';
} else if (subtitle.name.includes('Укр')) {
language = 'uk';
} else if (subtitle.name.includes('Eng')) {
language = 'en';
} else {
continue;
}
captions.push({
id: subtitle.url,
url: subtitle.url,
language,
type: 'vtt',
hasCorsRestrictions: false,
});
}
captions = removeDuplicatedLanguages(captions);
return captions;
}

View File

@@ -1,103 +0,0 @@
import { flags } from '@/entrypoint/utils/targets';
import { makeSourcerer } from '@/providers/base';
import { Caption } from '@/providers/captions';
import { NotFoundError } from '@/utils/errors';
import { getCaptions } from './captions';
import { Season } from './types';
const insertUnitBase = 'https://api.insertunit.ws/';
export const insertunitScraper = makeSourcerer({
id: 'insertunit',
name: 'Insertunit',
disabled: false,
rank: 60,
flags: [flags.CORS_ALLOWED],
async scrapeShow(ctx) {
const playerData = await ctx.fetcher<string>(`/embed/imdb/${ctx.media.imdbId}`, {
baseUrl: insertUnitBase,
});
ctx.progress(30);
const seasonDataJSONregex = /seasons:(.*)/;
const seasonData = seasonDataJSONregex.exec(playerData);
if (seasonData === null || seasonData[1] === null) {
throw new NotFoundError('No result found');
}
ctx.progress(60);
const seasonTable: Season[] = JSON.parse(seasonData[1]) as Season[];
const currentSeason = seasonTable.find(
(seasonElement) => seasonElement.season === ctx.media.season.number && !seasonElement.blocked,
);
const currentEpisode = currentSeason?.episodes.find((episodeElement) =>
episodeElement.episode.includes(ctx.media.episode.number.toString()),
);
if (!currentEpisode?.hls) throw new NotFoundError('No result found');
let captions: Caption[] = [];
if (currentEpisode.cc != null) {
captions = await getCaptions(currentEpisode.cc);
}
ctx.progress(95);
return {
embeds: [],
stream: [
{
id: 'primary',
playlist: currentEpisode.hls,
type: 'hls',
flags: [flags.CORS_ALLOWED],
captions,
},
],
};
},
async scrapeMovie(ctx) {
const playerData = await ctx.fetcher<string>(`/embed/imdb/${ctx.media.imdbId}`, {
baseUrl: insertUnitBase,
});
ctx.progress(35);
const streamRegex = /hls: "([^"]*)/;
const streamData = streamRegex.exec(playerData);
if (streamData === null || streamData[1] === null) {
throw new NotFoundError('No result found');
}
ctx.progress(75);
const subtitleRegex = /cc: (.*)/;
const subtitleJSONData = subtitleRegex.exec(playerData);
let captions: Caption[] = [];
if (subtitleJSONData != null && subtitleJSONData[1] != null) {
const subtitleData = JSON.parse(subtitleJSONData[1]);
captions = await getCaptions(subtitleData);
}
ctx.progress(90);
return {
embeds: [],
stream: [
{
id: 'primary',
type: 'hls',
playlist: streamData[1],
flags: [flags.CORS_ALLOWED],
captions,
},
],
};
},
});

View File

@@ -1,30 +0,0 @@
export interface Subtitle {
url: string;
name: string;
}
export interface Episode {
episode: string;
id: number;
videoKey: string;
hls: string;
audio: {
names: string[];
order: number[];
};
cc: Subtitle[];
duration: number;
title: string;
download: string;
sections: string[];
poster: string;
preview: {
src: string;
};
}
export interface Season {
season: number;
blocked: boolean;
episodes: Episode[];
}

View File

@@ -1,79 +0,0 @@
import { load } from 'cheerio';
import { SourcererOutput, makeSourcerer } from '@/providers/base';
import { compareMedia } from '@/utils/compare';
import { MovieScrapeContext, ShowScrapeContext } from '@/utils/context';
import { NotFoundError } from '@/utils/errors';
const baseUrl = 'https://w1.nites.is';
async function comboScraper(ctx: ShowScrapeContext | MovieScrapeContext): Promise<SourcererOutput> {
const searchPage = await ctx.proxiedFetcher('/wp-admin/admin-ajax.php', {
baseUrl,
method: 'POST',
body: new URLSearchParams({
action: 'ajax_pagination',
query_vars: 'mixed',
search: ctx.media.title,
}),
});
const $search = load(searchPage);
const searchResults: { title: string; year: number; url: string }[] = [];
$search('li').each((_, element) => {
const title = $search(element).find('.entry-title').first().text().trim();
const year = parseInt($search(element).find('.year').first().text().trim(), 10);
const url = $search(element).find('.lnk-blk').attr('href');
if (!title || !year || !url) return;
searchResults.push({ title, year, url });
});
let watchPageUrl = searchResults.find((x) => x && compareMedia(ctx.media, x.title, x.year))?.url;
if (!watchPageUrl) throw new NotFoundError('No watchable item found');
if (ctx.media.type === 'show') {
const match = watchPageUrl.match(/\/series\/([^/]+)\/?/);
if (!match) throw new Error('Failed to parse watch page url');
watchPageUrl = watchPageUrl.replace(
`/series/${match[1]}`,
`/episode/${match[1]}-${ctx.media.season.number}x${ctx.media.episode.number}`,
);
}
const watchPage = load(await ctx.proxiedFetcher(watchPageUrl));
// it embeds vidsrc when it bflix does not has the stream
// i think all shows embed vidsrc, not sure
const embedUrl = watchPage('ul.bx-lst li a:contains("- Bflix")')
.closest('aside')
.next('div.video-options')
.find('iframe')
.attr('data-lazy-src');
if (!embedUrl) throw new Error('Failed to find embed url');
const embedPage = load(await ctx.proxiedFetcher(embedUrl));
const url = embedPage('iframe').attr('src');
if (!url) throw new Error('Failed to find embed url');
return {
embeds: [
{
embedId: 'bflix',
url,
},
],
};
}
export const nitesScraper = makeSourcerer({
id: 'nites',
name: 'Nites',
rank: 90,
flags: [],
scrapeMovie: comboScraper,
scrapeShow: comboScraper,
});

View File

@@ -19,19 +19,13 @@ const universalScraper = async (ctx: MovieScrapeContext | ShowScrapeContext) =>
q: ctx.media.title,
},
});
const mediaData = searchResult.data.items.map((movieEl) => {
const name = movieEl.title;
const year = movieEl.contentable.releaseYear;
const fullSlug = movieEl.fullSlug;
return { name, year, fullSlug };
});
const targetMedia = mediaData.find((m) => m.name === ctx.media.title && m.year === ctx.media.releaseYear.toString());
if (!targetMedia?.fullSlug) throw new NotFoundError('No watchable item found');
const show = searchResult.data.items[0];
if (!show) throw new NotFoundError('No watchable item found');
let iframeSourceUrl = `/${targetMedia.fullSlug}/videos`;
let iframeSourceUrl = `/${show.fullSlug}/videos`;
if (ctx.media.type === 'show') {
const showPageResult = await ctx.proxiedFetcher<string>(`/${targetMedia.fullSlug}`, {
const showPageResult = await ctx.proxiedFetcher<string>(`/${show.fullSlug}`, {
baseUrl: ridoMoviesBase,
});
const fullEpisodeSlug = `season-${ctx.media.season.number}/episode-${ctx.media.episode.number}`;

View File

@@ -42,7 +42,6 @@ export const showboxScraper = makeSourcerer({
id: 'showbox',
name: 'Showbox',
rank: 150,
disabled: true,
flags: [flags.CORS_ALLOWED, flags.CF_BLOCKED],
scrapeShow: comboScraper,
scrapeMovie: comboScraper,

View File

@@ -1,29 +1,56 @@
import { load } from 'cheerio';
import { flags } from '@/entrypoint/utils/targets';
import { SourcererOutput, makeSourcerer } from '@/providers/base';
import { smashyStreamOScraper } from '@/providers/embeds/smashystream/opstream';
import { SourcererEmbed, SourcererOutput, makeSourcerer } from '@/providers/base';
import { smashyStreamDScraper } from '@/providers/embeds/smashystream/dued';
import { smashyStreamFScraper } from '@/providers/embeds/smashystream/video1';
import { MovieScrapeContext, ShowScrapeContext } from '@/utils/context';
const smashyStreamBase = 'https://embed.smashystream.com';
const referer = 'https://smashystream.com/';
const universalScraper = async (ctx: ShowScrapeContext | MovieScrapeContext): Promise<SourcererOutput> => {
// theres no point in fetching the player page
// because it too just calls the api with the tmdb id
// thats the only way to find out if the embed has any streams
const query =
ctx.media.type === 'movie'
? `?tmdb=${ctx.media.tmdbId}`
: `?tmdbId=${ctx.media.tmdbId}&season=${ctx.media.season.number}&episode=${ctx.media.episode.number}`;
const mainPage = await ctx.proxiedFetcher<string>('/playere.php', {
query: {
tmdb: ctx.media.tmdbId,
...(ctx.media.type === 'show' && {
season: ctx.media.season.number.toString(),
episode: ctx.media.episode.number.toString(),
}),
},
headers: {
Referer: referer,
},
baseUrl: smashyStreamBase,
});
ctx.progress(30);
const mainPage$ = load(mainPage);
const sourceUrls = mainPage$('.dropdown-menu a[data-url]')
.map((_, el) => mainPage$(el).attr('data-url'))
.get();
const embeds: SourcererEmbed[] = [];
for (const sourceUrl of sourceUrls) {
if (sourceUrl.includes('video1d.php')) {
embeds.push({
embedId: smashyStreamFScraper.id,
url: sourceUrl,
});
}
if (sourceUrl.includes('dued.php')) {
embeds.push({
embedId: smashyStreamDScraper.id,
url: sourceUrl,
});
}
}
ctx.progress(60);
return {
embeds: [
{
embedId: smashyStreamFScraper.id,
url: `https://embed.smashystream.com/video1dn.php${query}`,
},
{
embedId: smashyStreamOScraper.id,
url: `https://embed.smashystream.com/videoop.php${query}`,
},
],
embeds,
};
};
@@ -32,6 +59,7 @@ export const smashyStreamScraper = makeSourcerer({
name: 'SmashyStream',
rank: 30,
flags: [flags.CORS_ALLOWED],
disabled: true,
scrapeMovie: universalScraper,
scrapeShow: universalScraper,
});

View File

@@ -1,120 +0,0 @@
import { load } from 'cheerio';
import { flags } from '@/entrypoint/utils/targets';
import { Caption, labelToLanguageCode } from '@/providers/captions';
import { MovieScrapeContext, ShowScrapeContext } from '@/utils/context';
import { NotFoundError } from '@/utils/errors';
import { InfoResponse } from './types';
import { SourcererOutput, makeSourcerer } from '../../base';
const baseUrl = 'https://soaper.tv';
const universalScraper = async (ctx: MovieScrapeContext | ShowScrapeContext): Promise<SourcererOutput> => {
const searchResult = await ctx.proxiedFetcher('/search.html', {
baseUrl,
query: {
keyword: ctx.media.title,
},
});
const searchResult$ = load(searchResult);
let showLink = searchResult$('a')
.filter((_, el) => searchResult$(el).text() === ctx.media.title)
.attr('href');
if (!showLink) throw new NotFoundError('Content not found');
if (ctx.media.type === 'show') {
const seasonNumber = ctx.media.season.number;
const episodeNumber = ctx.media.episode.number;
const showPage = await ctx.proxiedFetcher(showLink, { baseUrl });
const showPage$ = load(showPage);
const seasonBlock = showPage$('h4')
.filter((_, el) => showPage$(el).text().trim().split(':')[0].trim() === `Season${seasonNumber}`)
.parent();
const episodes = seasonBlock.find('a').toArray();
showLink = showPage$(
episodes.find((el) => parseInt(showPage$(el).text().split('.')[0], 10) === episodeNumber),
).attr('href');
}
if (!showLink) throw new NotFoundError('Content not found');
const contentPage = await ctx.proxiedFetcher(showLink, { baseUrl });
const contentPage$ = load(contentPage);
const pass = contentPage$('#hId').attr('value');
const param = contentPage$('#divU').text();
if (!pass || !param) throw new NotFoundError('Content not found');
const formData = new URLSearchParams();
formData.append('pass', pass);
formData.append('param', param);
formData.append('e2', '0');
formData.append('server', '0');
const infoEndpoint = ctx.media.type === 'show' ? '/home/index/getEInfoAjax' : '/home/index/getMInfoAjax';
const streamRes = await ctx.proxiedFetcher<string>(infoEndpoint, {
baseUrl,
method: 'POST',
body: formData,
headers: {
referer: `${baseUrl}${showLink}`,
},
});
const streamResJson: InfoResponse = JSON.parse(streamRes);
const captions: Caption[] = [];
for (const sub of streamResJson.subs) {
// Some subtitles are named <Language>.srt, some are named <LanguageCode>:hi, or just <LanguageCode>
let language: string | null = '';
if (sub.name.includes('.srt')) {
language = labelToLanguageCode(sub.name.split('.srt')[0]);
} else if (sub.name.includes(':')) {
language = sub.name.split(':')[0];
} else {
language = sub.name;
}
if (!language) continue;
captions.push({
id: sub.path,
url: sub.path,
type: 'srt',
hasCorsRestrictions: false,
language,
});
}
return {
embeds: [],
stream: [
{
id: 'primary',
playlist: streamResJson.val,
type: 'hls',
flags: [flags.IP_LOCKED],
captions,
},
...(streamResJson.val_bak
? [
{
id: 'backup',
playlist: streamResJson.val_bak,
type: 'hls' as const,
flags: [flags.IP_LOCKED],
captions,
},
]
: []),
],
};
};
export const soaperTvScraper = makeSourcerer({
id: 'soapertv',
name: 'SoaperTV',
rank: 115,
flags: [flags.IP_LOCKED],
scrapeMovie: universalScraper,
scrapeShow: universalScraper,
});

View File

@@ -1,15 +0,0 @@
export interface Subtitle {
path: string;
name: string;
}
export interface InfoResponse {
key: boolean;
val: string;
vtt: string;
val_bak: string;
pos: number;
type: string;
subs: Subtitle[];
ip: string;
}

View File

@@ -1,21 +0,0 @@
import { load } from 'cheerio';
export const baseUrl = 'https://tugaflix.best/';
export function parseSearch(page: string): { title: string; year?: number; url: string }[] {
const results: { title: string; year?: number; url: string }[] = [];
const $ = load(page);
$('.items .poster').each((_, element) => {
const $link = $(element).find('a');
const url = $link.attr('href');
// ex title: Home Alone (1990)
const [, title, year] = $link.attr('title')?.match(/^(.*?)\s*(?:\((\d{4})\))?\s*$/) || [];
if (!title || !url) return;
// tiles dont always have the year
results.push({ title, year: year ? parseInt(year, 10) : undefined, url });
});
return results;
}

View File

@@ -1,116 +0,0 @@
import { load } from 'cheerio';
import { flags } from '@/entrypoint/utils/targets';
import { SourcererEmbed, makeSourcerer } from '@/providers/base';
import { compareMedia } from '@/utils/compare';
import { NotFoundError } from '@/utils/errors';
import { baseUrl, parseSearch } from './common';
export const tugaflixScraper = makeSourcerer({
id: 'tugaflix',
name: 'Tugaflix',
rank: 73,
flags: [flags.IP_LOCKED],
scrapeMovie: async (ctx) => {
const searchResults = parseSearch(
await ctx.proxiedFetcher<string>('/filmes/', {
baseUrl,
query: {
s: ctx.media.title,
},
}),
);
if (searchResults.length === 0) throw new NotFoundError('No watchable item found');
const url = searchResults.find((x) => x && compareMedia(ctx.media, x.title, x.year))?.url;
if (!url) throw new NotFoundError('No watchable item found');
const videoPage = await ctx.proxiedFetcher<string>(url, {
method: 'POST',
body: new URLSearchParams({ play: '' }),
});
const $ = load(videoPage);
const embeds: SourcererEmbed[] = [];
for (const element of $('.play a')) {
const embedUrl = $(element).attr('href');
if (!embedUrl) continue;
const embedPage = await ctx.proxiedFetcher.full(
embedUrl.startsWith('https://') ? embedUrl : `https://${embedUrl}`,
);
const finalUrl = load(embedPage.body)('a:contains("Download Filme")').attr('href');
if (!finalUrl) continue;
if (finalUrl.includes('streamtape')) {
embeds.push({
embedId: 'streamtape',
url: finalUrl,
});
// found doodstream on a few shows, maybe movies use it too?
// the player 2 is just streamtape in a custom player
} else if (finalUrl.includes('dood')) {
embeds.push({
embedId: 'dood',
url: finalUrl,
});
}
}
return {
embeds,
};
},
scrapeShow: async (ctx) => {
const searchResults = parseSearch(
await ctx.proxiedFetcher<string>('/series/', {
baseUrl,
query: {
s: ctx.media.title,
},
}),
);
if (searchResults.length === 0) throw new NotFoundError('No watchable item found');
const url = searchResults.find((x) => x && compareMedia(ctx.media, x.title, x.year))?.url;
if (!url) throw new NotFoundError('No watchable item found');
const s = ctx.media.season.number < 10 ? `0${ctx.media.season.number}` : ctx.media.season.number.toString();
const e = ctx.media.episode.number < 10 ? `0${ctx.media.episode.number}` : ctx.media.episode.number.toString();
const videoPage = await ctx.proxiedFetcher(url, {
method: 'POST',
body: new URLSearchParams({ [`S${s}E${e}`]: '' }),
});
const embedUrl = load(videoPage)('iframe[name="player"]').attr('src');
if (!embedUrl) throw new Error('Failed to find iframe');
const playerPage = await ctx.proxiedFetcher(embedUrl.startsWith('https:') ? embedUrl : `https:${embedUrl}`, {
method: 'POST',
body: new URLSearchParams({ submit: '' }),
});
const embeds: SourcererEmbed[] = [];
const finalUrl = load(playerPage)('a:contains("Download Episodio")').attr('href');
if (finalUrl?.includes('streamtape')) {
embeds.push({
embedId: 'streamtape',
url: finalUrl,
});
} else if (finalUrl?.includes('dood')) {
embeds.push({
embedId: 'dood',
url: finalUrl,
});
}
return {
embeds,
};
},
});

View File

@@ -1,2 +1,2 @@
export const vidsrcBase = 'https://vidsrc.me';
export const vidsrcRCPBase = 'https://vidsrc.stream';
export const vidsrcRCPBase = 'https://rcp.vidsrc.me';

View File

@@ -1,3 +1,4 @@
import { flags } from '@/entrypoint/utils/targets';
import { makeSourcerer } from '@/providers/base';
import { scrapeMovie } from '@/providers/sources/vidsrc/scrape-movie';
import { scrapeShow } from '@/providers/sources/vidsrc/scrape-show';
@@ -6,8 +7,7 @@ export const vidsrcScraper = makeSourcerer({
id: 'vidsrc',
name: 'VidSrc',
rank: 90,
disabled: true,
flags: [],
flags: [flags.CORS_ALLOWED],
scrapeMovie,
scrapeShow,
});

View File

@@ -60,16 +60,10 @@ const universalScraper = async (ctx: ShowScrapeContext | MovieScrapeContext): Pr
const urlWithSubtitles = embedArr.find((v) => v.source === 'Vidplay' && v.url.includes('sub.info'))?.url;
const subtitleUrl = urlWithSubtitles ? new URL(urlWithSubtitles).searchParams.get('sub.info') : null;
if (subtitleUrl) fullUrl.searchParams.set('sub.info', subtitleUrl);
embeds.push(
{
embedId: 'filemoon',
url: fullUrl.toString(),
},
{
embedId: 'filemoon-mp4',
url: fullUrl.toString(),
},
);
embeds.push({
embedId: 'filemoon',
url: fullUrl.toString(),
});
}
}

View File

@@ -1,24 +0,0 @@
import { ScrapeContext } from '@/utils/context';
export const warezcdnBase = 'https://embed.warezcdn.com';
export const warezcdnApiBase = 'https://warezcdn.com/embed';
export const warezcdnPlayerBase = 'https://warezcdn.com/player';
export const warezcdnWorkerProxy = 'https://workerproxy.warezcdn.workers.dev';
export async function getExternalPlayerUrl(ctx: ScrapeContext, embedId: string, embedUrl: string) {
const params = {
id: embedUrl,
sv: embedId,
};
const realUrl = await ctx.proxiedFetcher<string>(`/getPlay.php`, {
baseUrl: warezcdnApiBase,
headers: {
Referer: `${warezcdnApiBase}/getEmbed.php?${new URLSearchParams(params)}`,
},
query: params,
});
const realEmbedUrl = realUrl.match(/window\.location\.href="([^"]*)";/);
if (!realEmbedUrl) throw new Error('Could not find embed url');
return realEmbedUrl[1];
}

View File

@@ -1,114 +0,0 @@
import { load } from 'cheerio';
import { flags } from '@/entrypoint/utils/targets';
import { SourcererEmbed, makeSourcerer } from '@/providers/base';
import { mixdropScraper } from '@/providers/embeds/mixdrop';
import { warezcdnembedHlsScraper } from '@/providers/embeds/warezcdn/hls';
import { warezcdnembedMp4Scraper } from '@/providers/embeds/warezcdn/mp4';
import { NotFoundError } from '@/utils/errors';
import { getExternalPlayerUrl, warezcdnBase } from './common';
import { SerieAjaxResponse } from './types';
export const warezcdnScraper = makeSourcerer({
id: 'warezcdn',
name: 'WarezCDN',
rank: 81,
flags: [flags.CORS_ALLOWED],
scrapeMovie: async (ctx) => {
if (!ctx.media.imdbId) throw new NotFoundError('This source requires IMDB id.');
const serversPage = await ctx.proxiedFetcher<string>(`/filme/${ctx.media.imdbId}`, {
baseUrl: warezcdnBase,
});
const $ = load(serversPage);
const embedsHost = $('.hostList.active [data-load-embed]').get();
const embeds: SourcererEmbed[] = [];
embedsHost.forEach(async (element) => {
const embedHost = $(element).attr('data-load-embed-host')!;
const embedUrl = $(element).attr('data-load-embed')!;
if (embedHost === 'mixdrop') {
const realEmbedUrl = await getExternalPlayerUrl(ctx, 'mixdrop', embedUrl);
if (!realEmbedUrl) throw new Error('Could not find embed url');
embeds.push({
embedId: mixdropScraper.id,
url: realEmbedUrl,
});
} else if (embedHost === 'warezcdn') {
embeds.push(
{
embedId: warezcdnembedHlsScraper.id,
url: embedUrl,
},
{
embedId: warezcdnembedMp4Scraper.id,
url: embedUrl,
},
);
}
});
return {
embeds,
};
},
scrapeShow: async (ctx) => {
if (!ctx.media.imdbId) throw new NotFoundError('This source requires IMDB id.');
const url = `${warezcdnBase}/serie/${ctx.media.imdbId}/${ctx.media.season.number}/${ctx.media.episode.number}`;
const serversPage = await ctx.proxiedFetcher<string>(url);
const episodeId = serversPage.match(/\$\('\[data-load-episode-content="(\d+)"\]'\)/)?.[1];
if (!episodeId) throw new NotFoundError('Failed to find episode id');
const streamsData = await ctx.proxiedFetcher<string>(`/serieAjax.php`, {
method: 'POST',
baseUrl: warezcdnBase,
body: new URLSearchParams({
getAudios: episodeId,
}),
headers: {
Origin: warezcdnBase,
Referer: url,
'X-Requested-With': 'XMLHttpRequest',
},
});
const streams: SerieAjaxResponse = JSON.parse(streamsData);
const list = streams.list['0'];
const embeds: SourcererEmbed[] = [];
// 3 means ok
if (list.mixdropStatus === '3') {
const realEmbedUrl = await getExternalPlayerUrl(ctx, 'mixdrop', list.id);
if (!realEmbedUrl) throw new Error('Could not find embed url');
embeds.push({
embedId: mixdropScraper.id,
url: realEmbedUrl,
});
}
if (list.warezcdnStatus === '3') {
embeds.push(
{
embedId: warezcdnembedHlsScraper.id,
url: list.id,
},
{
embedId: warezcdnembedMp4Scraper.id,
url: list.id,
},
);
}
return {
embeds,
};
},
});

View File

@@ -1,16 +0,0 @@
interface Data {
id: string;
audio: string;
mixdropStatus: string;
fembedStatus: string;
streamtapeStatus: string;
warezcdnStatus: string;
}
type List = {
[key: string]: Data;
};
export interface SerieAjaxResponse {
list: List;
}

View File

@@ -59,19 +59,21 @@ export async function scrapeInvidualSource(
if (!output) throw new Error('output is null');
// filter output with only valid embeds that are not disabled
output.embeds = output.embeds.filter((embed) => {
const e = list.embeds.find((v) => v.id === embed.embedId);
if (!e || e.disabled) return false;
return true;
});
// filter output with only valid embeds that are not disabled, and remove duplicates
output.embeds = output.embeds
.filter((embed) => {
const e = list.embeds.find((v) => v.id === embed.embedId);
if (!e || e.disabled) return false;
return true;
})
.filter((v, i, a) => a.findIndex((t) => t.embedId === v.embedId) === i);
if ((!output.stream || output.stream.length === 0) && output.embeds.length === 0)
throw new NotFoundError('No streams found');
// only check for playable streams if there are streams, and if there are no embeds
if (output.stream && output.stream.length > 0 && output.embeds.length === 0) {
const playableStreams = await validatePlayableStreams(output.stream, ops, sourceScraper.id);
const playableStreams = await validatePlayableStreams(output.stream, ops);
if (playableStreams.length === 0) throw new NotFoundError('No playable streams found');
output.stream = playableStreams;
}
@@ -112,7 +114,7 @@ export async function scrapeIndividualEmbed(
.filter((stream) => flagsAllowedInFeatures(ops.features, stream.flags));
if (output.stream.length === 0) throw new NotFoundError('No streams found');
const playableStreams = await validatePlayableStreams(output.stream, ops, embedScraper.id);
const playableStreams = await validatePlayableStreams(output.stream, ops);
if (playableStreams.length === 0) throw new NotFoundError('No playable streams found');
output.stream = playableStreams;

View File

@@ -104,7 +104,7 @@ export async function runAllProviders(list: ProviderList, ops: ProviderRunnerOpt
// return stream is there are any
if (output.stream?.[0]) {
const playableStream = await validatePlayableStream(output.stream[0], ops, source.id);
const playableStream = await validatePlayableStream(output.stream[0], ops);
if (!playableStream) throw new NotFoundError('No streams found');
return {
sourceId: source.id,
@@ -112,12 +112,13 @@ export async function runAllProviders(list: ProviderList, ops: ProviderRunnerOpt
};
}
// filter disabled and run embed scrapers on listed embeds
// filter disabled, filter out duplicates, run embed scrapers on listed embeds
const sortedEmbeds = output.embeds
.filter((embed) => {
const e = list.embeds.find((v) => v.id === embed.embedId);
return e && !e.disabled;
})
.filter((v, i, a) => a.findIndex((t) => t.embedId === v.embedId) === i)
.sort((a, b) => embedIds.indexOf(a.embedId) - embedIds.indexOf(b.embedId));
if (sortedEmbeds.length > 0) {
@@ -151,7 +152,7 @@ export async function runAllProviders(list: ProviderList, ops: ProviderRunnerOpt
if (embedOutput.stream.length === 0) {
throw new NotFoundError('No streams found');
}
const playableStream = await validatePlayableStream(embedOutput.stream[0], ops, embed.embedId);
const playableStream = await validatePlayableStream(embedOutput.stream[0], ops);
if (!playableStream) throw new NotFoundError('No streams found');
embedOutput.stream = [playableStream];
} catch (error) {

Some files were not shown because too many files have changed in this diff Show More