Refurb linting
Lint with [refurb](https://github.com/dosisod/refurb) using `--disable 126 --python-version 3.9`
This commit is contained in:
@@ -75,7 +75,7 @@ class Configuration(Namespace):
|
|||||||
if not yaml_file_loc.exists():
|
if not yaml_file_loc.exists():
|
||||||
logger.error(f'No YAML file found at {yaml_file_loc}')
|
logger.error(f'No YAML file found at {yaml_file_loc}')
|
||||||
return
|
return
|
||||||
with open(yaml_file_loc) as file:
|
with yaml_file_loc.open() as file:
|
||||||
try:
|
try:
|
||||||
opts = yaml.load(file, Loader=yaml.FullLoader)
|
opts = yaml.load(file, Loader=yaml.FullLoader)
|
||||||
except yaml.YAMLError as e:
|
except yaml.YAMLError as e:
|
||||||
|
|||||||
@@ -91,7 +91,7 @@ class RedditConnector(metaclass=ABCMeta):
|
|||||||
logger.log(9, 'Created site authenticator')
|
logger.log(9, 'Created site authenticator')
|
||||||
|
|
||||||
self.args.skip_subreddit = self.split_args_input(self.args.skip_subreddit)
|
self.args.skip_subreddit = self.split_args_input(self.args.skip_subreddit)
|
||||||
self.args.skip_subreddit = set([sub.lower() for sub in self.args.skip_subreddit])
|
self.args.skip_subreddit = {sub.lower() for sub in self.args.skip_subreddit}
|
||||||
|
|
||||||
def read_config(self):
|
def read_config(self):
|
||||||
"""Read any cfg values that need to be processed"""
|
"""Read any cfg values that need to be processed"""
|
||||||
@@ -113,7 +113,7 @@ class RedditConnector(metaclass=ABCMeta):
|
|||||||
def parse_disabled_modules(self):
|
def parse_disabled_modules(self):
|
||||||
disabled_modules = self.args.disable_module
|
disabled_modules = self.args.disable_module
|
||||||
disabled_modules = self.split_args_input(disabled_modules)
|
disabled_modules = self.split_args_input(disabled_modules)
|
||||||
disabled_modules = set([name.strip().lower() for name in disabled_modules])
|
disabled_modules = {name.strip().lower() for name in disabled_modules}
|
||||||
self.args.disable_module = disabled_modules
|
self.args.disable_module = disabled_modules
|
||||||
logger.debug(f'Disabling the following modules: {", ".join(self.args.disable_module)}')
|
logger.debug(f'Disabling the following modules: {", ".join(self.args.disable_module)}')
|
||||||
|
|
||||||
@@ -249,7 +249,7 @@ class RedditConnector(metaclass=ABCMeta):
|
|||||||
if self.args.authenticate:
|
if self.args.authenticate:
|
||||||
try:
|
try:
|
||||||
subscribed_subreddits = list(self.reddit_instance.user.subreddits(limit=None))
|
subscribed_subreddits = list(self.reddit_instance.user.subreddits(limit=None))
|
||||||
subscribed_subreddits = set([s.display_name for s in subscribed_subreddits])
|
subscribed_subreddits = {s.display_name for s in subscribed_subreddits}
|
||||||
except prawcore.InsufficientScope:
|
except prawcore.InsufficientScope:
|
||||||
logger.error('BDFR has insufficient scope to access subreddit lists')
|
logger.error('BDFR has insufficient scope to access subreddit lists')
|
||||||
else:
|
else:
|
||||||
@@ -428,7 +428,7 @@ class RedditConnector(metaclass=ABCMeta):
|
|||||||
if not id_file.exists():
|
if not id_file.exists():
|
||||||
logger.warning(f'ID file at {id_file} does not exist')
|
logger.warning(f'ID file at {id_file} does not exist')
|
||||||
continue
|
continue
|
||||||
with open(id_file, 'r') as file:
|
with id_file.open('r') as file:
|
||||||
for line in file:
|
for line in file:
|
||||||
out.append(line.strip())
|
out.append(line.strip())
|
||||||
return set(out)
|
return set(out)
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ class DownloadFilter:
|
|||||||
combined_extensions = '|'.join(self.excluded_extensions)
|
combined_extensions = '|'.join(self.excluded_extensions)
|
||||||
pattern = re.compile(r'.*({})$'.format(combined_extensions))
|
pattern = re.compile(r'.*({})$'.format(combined_extensions))
|
||||||
if re.match(pattern, resource_extension):
|
if re.match(pattern, resource_extension):
|
||||||
logger.log(9, f'Url "{resource_extension}" matched with "{str(pattern)}"')
|
logger.log(9, f'Url "{resource_extension}" matched with "{pattern}"')
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
@@ -47,7 +47,7 @@ class DownloadFilter:
|
|||||||
combined_domains = '|'.join(self.excluded_domains)
|
combined_domains = '|'.join(self.excluded_domains)
|
||||||
pattern = re.compile(r'https?://.*({}).*'.format(combined_domains))
|
pattern = re.compile(r'https?://.*({}).*'.format(combined_domains))
|
||||||
if re.match(pattern, url):
|
if re.match(pattern, url):
|
||||||
logger.log(9, f'Url "{url}" matched with "{str(pattern)}"')
|
logger.log(9, f'Url "{url}" matched with "{pattern}"')
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ logger = logging.getLogger(__name__)
|
|||||||
def _calc_hash(existing_file: Path):
|
def _calc_hash(existing_file: Path):
|
||||||
chunk_size = 1024 * 1024
|
chunk_size = 1024 * 1024
|
||||||
md5_hash = hashlib.md5()
|
md5_hash = hashlib.md5()
|
||||||
with open(existing_file, 'rb') as file:
|
with existing_file.open('rb') as file:
|
||||||
chunk = file.read(chunk_size)
|
chunk = file.read(chunk_size)
|
||||||
while chunk:
|
while chunk:
|
||||||
md5_hash.update(chunk)
|
md5_hash.update(chunk)
|
||||||
@@ -127,7 +127,7 @@ class RedditDownloader(RedditConnector):
|
|||||||
f' in submission {submission.id}')
|
f' in submission {submission.id}')
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
with open(destination, 'wb') as file:
|
with destination.open('wb') as file:
|
||||||
file.write(res.content)
|
file.write(res.content)
|
||||||
logger.debug(f'Written file to {destination}')
|
logger.debug(f'Written file to {destination}')
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
|
|||||||
@@ -107,7 +107,7 @@ class FileNameFormatter:
|
|||||||
destination_directory,
|
destination_directory,
|
||||||
*[self._format_name(resource.source_submission, part) for part in self.directory_format_string],
|
*[self._format_name(resource.source_submission, part) for part in self.directory_format_string],
|
||||||
)
|
)
|
||||||
index = f'_{str(index)}' if index else ''
|
index = f'_{index}' if index else ''
|
||||||
if not resource.extension:
|
if not resource.extension:
|
||||||
raise BulkDownloaderException(f'Resource from {resource.url} has no extension')
|
raise BulkDownloaderException(f'Resource from {resource.url} has no extension')
|
||||||
file_name = str(self._format_name(resource.source_submission, self.file_format_string))
|
file_name = str(self._format_name(resource.source_submission, self.file_format_string))
|
||||||
|
|||||||
@@ -48,11 +48,11 @@ class Youtube(BaseDownloader):
|
|||||||
raise SiteDownloaderError(f'Youtube download failed: {e}')
|
raise SiteDownloaderError(f'Youtube download failed: {e}')
|
||||||
|
|
||||||
downloaded_files = list(download_path.iterdir())
|
downloaded_files = list(download_path.iterdir())
|
||||||
if len(downloaded_files) > 0:
|
if downloaded_files:
|
||||||
downloaded_file = downloaded_files[0]
|
downloaded_file = downloaded_files[0]
|
||||||
else:
|
else:
|
||||||
raise NotADownloadableLinkError(f"No media exists in the URL {self.post.url}")
|
raise NotADownloadableLinkError(f"No media exists in the URL {self.post.url}")
|
||||||
with open(downloaded_file, 'rb') as file:
|
with downloaded_file.open('rb') as file:
|
||||||
content = file.read()
|
content = file.read()
|
||||||
return content
|
return content
|
||||||
return download
|
return download
|
||||||
|
|||||||
Reference in New Issue
Block a user