https://gcc.gnu.org/g:adaa6472e158d51fc30a74d799eb2a3e9576942c

commit adaa6472e158d51fc30a74d799eb2a3e9576942c
Author: Eric Gallager <egalla...@gcc.gnu.org>
Date:   Wed Aug 28 03:28:10 2024 -0400

    run `ruff check --fix`

Diff:
---
 contrib/analyze_brprob.py                      |  4 +---
 contrib/check_GNU_style_lib.py                 |  1 -
 contrib/gcc-changelog/git_commit.py            |  4 ++--
 contrib/header-tools/headerutils.py            | 22 ++++++++++------------
 contrib/unicode/from_glibc/utf8_gen.py         |  9 ++++-----
 contrib/unicode/gen-combining-chars.py         |  2 +-
 contrib/unicode/gen-printable-chars.py         |  2 +-
 contrib/unicode/gen_libstdcxx_unicode_data.py  |  9 ++++-----
 contrib/unused_functions.py                    |  5 +++--
 gcc/jit/docs/conf.py                           |  2 +-
 gcc/m2/tools-src/boilerplate.py                |  2 +-
 gcc/m2/tools-src/tidydates.py                  |  2 +-
 gcc/regenerate-opt-urls.py                     |  2 --
 gcc/testsuite/g++.dg/modules/test-depfile.py   |  1 -
 maintainer-scripts/bugzilla-close-candidate.py |  3 +--
 15 files changed, 30 insertions(+), 40 deletions(-)

diff --git a/contrib/analyze_brprob.py b/contrib/analyze_brprob.py
index ca42fa6a6c26..17b8865ac6a7 100755
--- a/contrib/analyze_brprob.py
+++ b/contrib/analyze_brprob.py
@@ -64,8 +64,6 @@
 #  "opcode values nonequal (on trees)" heuristics has good hirate, but poor
 #  coverage.
 
-import sys
-import os
 import re
 import argparse
 
@@ -227,7 +225,7 @@ class Profile:
         self.niter_vector = []
 
     def add(self, name, prediction, count, hits):
-        if not name in self.heuristics:
+        if name not in self.heuristics:
             self.heuristics[name] = Summary(name)
 
         s = self.heuristics[name]
diff --git a/contrib/check_GNU_style_lib.py b/contrib/check_GNU_style_lib.py
index 6dbe4b53559c..07a33ce6fca4 100755
--- a/contrib/check_GNU_style_lib.py
+++ b/contrib/check_GNU_style_lib.py
@@ -26,7 +26,6 @@
 # like this:
 # $ pip3 install unidiff termcolor
 
-import sys
 import re
 import unittest
 
diff --git a/contrib/gcc-changelog/git_commit.py 
b/contrib/gcc-changelog/git_commit.py
index 87ecb9e1a17d..170bcad14348 100755
--- a/contrib/gcc-changelog/git_commit.py
+++ b/contrib/gcc-changelog/git_commit.py
@@ -348,7 +348,7 @@ class GitCommit:
         if self.info.lines and not self.revert_commit:
             self.subject_prs = {m.group('pr') for m in 
subject_pr2_regex.finditer(info.lines[0])}
             for m in subject_pr_regex.finditer(info.lines[0]):
-                if not m.group('component') in bug_components:
+                if m.group('component') not in bug_components:
                     self.errors.append(Error('invalid PR component in 
subject', info.lines[0]))
                 self.subject_prs.add(m.group('pr'))
 
@@ -506,7 +506,7 @@ class GitCommit:
                     if not component:
                         self.errors.append(Error('missing PR component', line))
                         continue
-                    elif not component[:-1] in bug_components:
+                    elif component[:-1] not in bug_components:
                         self.errors.append(Error('invalid PR component', line))
                         continue
                     else:
diff --git a/contrib/header-tools/headerutils.py 
b/contrib/header-tools/headerutils.py
index 1fb022144146..d62a79bd2733 100755
--- a/contrib/header-tools/headerutils.py
+++ b/contrib/header-tools/headerutils.py
@@ -1,10 +1,8 @@
 #! /usr/bin/python3
 import os.path
 import sys
-import shlex
 import re
 import subprocess
-import shutil
 import pickle
 
 import multiprocessing 
@@ -31,7 +29,7 @@ def find_pound_define (line):
       print ("What? more than 1 match in #define??")
       print (inc)
       sys.exit(5)
-    return inc[0];
+    return inc[0]
   return ""
 
 def is_pound_if (line):
@@ -95,7 +93,7 @@ def process_include_info (filen, do_macros, keep_src):
   if not os.path.exists (filen):
     return empty_iinfo
 
-  sfile = open (filen, "r");
+  sfile = open (filen, "r")
   data = sfile.readlines()
   sfile.close()
 
@@ -132,7 +130,7 @@ def process_include_info (filen, do_macros, keep_src):
       d = find_pound_define (line)
       if d:
         if d not in macout:
-          macout.append (d);
+          macout.append (d)
           continue
 
       d = find_pound_if (line)
@@ -146,7 +144,7 @@ def process_include_info (filen, do_macros, keep_src):
         else:
           for mac in d:
             if mac != "defined" and mac not in macin:
-              macin.append (mac);
+              macin.append (mac)
 
   if not keep_src:
     data = list()
@@ -299,7 +297,7 @@ def find_unique_include_list (filen):
 # (filen, macin, macout, incl)
 
 def create_macro_in_out (filen):
-  sfile = open (filen, "r");
+  sfile = open (filen, "r")
   data = sfile.readlines()
   sfile.close()
 
@@ -311,14 +309,14 @@ def create_macro_in_out (filen):
     d = find_pound_define (line)
     if d != "":
       if d not in macout:
-        macout.append (d);
+        macout.append (d)
       continue
 
     d = find_pound_if (line)
     if len(d) != 0:
       for mac in d:
         if mac != "defined" and mac not in macin:
-          macin.append (mac);
+          macin.append (mac)
       continue
 
     nm = find_pound_include (line, True, True)
@@ -474,7 +472,7 @@ def get_make_rc (rc, output):
     h = re.findall ("warning: inline function.*used but never defined", output)
     if len(h) != 0:
       rc = 1
-  return rc;
+  return rc
 
 def get_make_output (build_dir, make_opt):
   devnull = open('/dev/null', 'w')
@@ -485,7 +483,7 @@ def get_make_output (build_dir, make_opt):
   else:
     command = make + make_opt
   process = subprocess.Popen(command, stdout=devnull, stderr=subprocess.PIPE, 
shell=True)
-  output = process.communicate();
+  output = process.communicate()
   rc = get_make_rc (process.returncode, output[1])
   return (rc , output[1])
 
@@ -509,7 +507,7 @@ def spawn_makes (command_list):
     if (ret[0] != 0):
       # Just record the first one.
       if rc[0] == 0:
-        rc = ret;
+        rc = ret
   return rc
 
 def get_make_output_parallel (targ_list, make_opt, at_a_time):
diff --git a/contrib/unicode/from_glibc/utf8_gen.py 
b/contrib/unicode/from_glibc/utf8_gen.py
index 5e77333bb4f9..abf55d6ca915 100755
--- a/contrib/unicode/from_glibc/utf8_gen.py
+++ b/contrib/unicode/from_glibc/utf8_gen.py
@@ -28,7 +28,6 @@ It will output UTF-8 file
 '''
 
 import argparse
-import sys
 import re
 import unicode_utils
 
@@ -147,10 +146,10 @@ def process_charmap(flines, outfile):
         #
         # 3400;<CJK Ideograph Extension A, First>;Lo;0;L;;;;;N;;;;;
         # 4DB5;<CJK Ideograph Extension A, Last>;Lo;0;L;;;;;N;;;;;
-        if fields[1].endswith(', First>') and not 'Surrogate,' in fields[1]:
+        if fields[1].endswith(', First>') and 'Surrogate,' not in fields[1]:
             fields_start = fields
             continue
-        if fields[1].endswith(', Last>') and not 'Surrogate,' in fields[1]:
+        if fields[1].endswith(', Last>') and 'Surrogate,' not in fields[1]:
             process_range(fields_start[0], fields[0],
                           outfile, fields[1][:-7]+'>')
             fields_start = []
@@ -227,7 +226,7 @@ def process_width(outfile, ulines, elines, plines):
     width_dict = {}
     for line in elines:
         fields = line.split(";")
-        if not '..' in fields[0]:
+        if '..' not in fields[0]:
             code_points = (fields[0], fields[0])
         else:
             code_points = fields[0].split("..")
@@ -244,7 +243,7 @@ def process_width(outfile, ulines, elines, plines):
         # Characters with the property “Prepended_Concatenation_Mark”
         # should have the width 1:
         fields = line.split(";")
-        if not '..' in fields[0]:
+        if '..' not in fields[0]:
             code_points = (fields[0], fields[0])
         else:
             code_points = fields[0].split("..")
diff --git a/contrib/unicode/gen-combining-chars.py 
b/contrib/unicode/gen-combining-chars.py
index 9f9365ca0a23..8649171538ac 100755
--- a/contrib/unicode/gen-combining-chars.py
+++ b/contrib/unicode/gen-combining-chars.py
@@ -51,7 +51,7 @@ ranges = make_ranges(is_combining_char)
 if 0:
     pprint(ranges)
 
-print(f"/* Generated by contrib/unicode/gen-combining-chars.py")
+print("/* Generated by contrib/unicode/gen-combining-chars.py")
 print(f"   using version {unicodedata.unidata_version}"
       " of the Unicode standard.  */")
 print("\nstatic const cppchar_t combining_range_ends[] = {", end="")
diff --git a/contrib/unicode/gen-printable-chars.py 
b/contrib/unicode/gen-printable-chars.py
index 2d4ed4eed7bc..17c7acbe7e17 100755
--- a/contrib/unicode/gen-printable-chars.py
+++ b/contrib/unicode/gen-printable-chars.py
@@ -53,7 +53,7 @@ ranges = make_ranges(is_printable_char)
 if 0:
     pprint(ranges)
 
-print(f"/* Generated by contrib/unicode/gen-printable-chars.py")
+print("/* Generated by contrib/unicode/gen-printable-chars.py")
 print(f"   using version {unicodedata.unidata_version}"
       " of the Unicode standard.  */")
 print("\nstatic const cppchar_t printable_range_ends[] = {", end="")
diff --git a/contrib/unicode/gen_libstdcxx_unicode_data.py 
b/contrib/unicode/gen_libstdcxx_unicode_data.py
index da2f6ee66bf4..19791284942f 100755
--- a/contrib/unicode/gen_libstdcxx_unicode_data.py
+++ b/contrib/unicode/gen_libstdcxx_unicode_data.py
@@ -26,7 +26,6 @@
 # Then run this script and save the output to
 # ../../libstdc++-v3/include/bits/unicode-data.h
 
-import sys
 import re
 import math
 import os
@@ -127,7 +126,7 @@ edges = find_edges(all_code_points, 1)
 # Table for std::__unicode::__format_width(char32_t)
 
 print("  // Table generated by contrib/unicode/gen_std_format_width.py,")
-print("  // from EastAsianWidth.txt from the Unicode standard.");
+print("  // from EastAsianWidth.txt from the Unicode standard.")
 print("  inline constexpr char32_t __width_edges[] = {", end="")
 for i, e in enumerate(edges):
     if i % 8:
@@ -168,7 +167,7 @@ print("  };\n")
 # Tables for std::__unicode::_Grapheme_cluster_state
 
 print("  // Values generated by contrib/unicode/gen_std_format_width.py,")
-print("  // from GraphemeBreakProperty.txt from the Unicode standard.");
+print("  // from GraphemeBreakProperty.txt from the Unicode standard.")
 print("  // Entries are (code_point << shift_bits) + property.")
 print("  inline constexpr int __gcb_shift_bits = {:#x};".format(shift_bits))
 print("  inline constexpr uint32_t __gcb_edges[] = {", end="")
@@ -210,7 +209,7 @@ incb_props = {None:0, "Consonant":1, "Extend":2}
 print("  enum class _InCB { _Consonant = 1, _Extend = 2 };\n")
 # Table for std::__unicode::__incb_property
 print("  // Values generated by contrib/unicode/gen_std_format_width.py,")
-print("  // from DerivedCoreProperties.txt from the Unicode standard.");
+print("  // from DerivedCoreProperties.txt from the Unicode standard.")
 print("  // Entries are (code_point << 2) + property.")
 print("  inline constexpr uint32_t __incb_edges[] = {", end="")
 for i, e in enumerate(edges):
@@ -239,7 +238,7 @@ edges = find_edges(all_code_points, False)
 
 # Table for std::__unicode::__is_extended_pictographic
 print("  // Table generated by contrib/unicode/gen_std_format_width.py,")
-print("  // from emoji-data.txt from the Unicode standard.");
+print("  // from emoji-data.txt from the Unicode standard.")
 print("  inline constexpr char32_t __xpicto_edges[] = {", end="")
 for i, e in enumerate(edges):
     if i % 8:
diff --git a/contrib/unused_functions.py b/contrib/unused_functions.py
index f24b3970468b..5d12c58dd29e 100755
--- a/contrib/unused_functions.py
+++ b/contrib/unused_functions.py
@@ -17,14 +17,15 @@
 # unused_functions.py gcc/c  gcc/c-family/ gcc/*-c.o | grep -v "'gt_"
 # unused_functions.py gcc/cp gcc/c-family/ gcc/*-c.o | grep -v "'gt_"
 
-import sys, os
+import sys
+import os
 from tempfile import mkdtemp
 from subprocess import Popen, PIPE
 
 def usage():
     sys.stderr.write("usage: %s [-v] [dirs | files] [-- <readelf options>]\n"
                         % sys.argv[0])
-    sys.stderr.write("\t-v\tVerbose output\n");
+    sys.stderr.write("\t-v\tVerbose output\n")
     sys.exit(1)
 
 (odir, sym_args, tmpd, verbose) = (set(), "", None, False)
diff --git a/gcc/jit/docs/conf.py b/gcc/jit/docs/conf.py
index c241d0cdf349..fe1e5e19bf2c 100644
--- a/gcc/jit/docs/conf.py
+++ b/gcc/jit/docs/conf.py
@@ -11,7 +11,7 @@
 # All configuration values have a default; values that are commented out
 # serve to show the default.
 
-import sys, os
+import os
 
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
diff --git a/gcc/m2/tools-src/boilerplate.py b/gcc/m2/tools-src/boilerplate.py
index 313446871ee6..845ca6b95848 100644
--- a/gcc/m2/tools-src/boilerplate.py
+++ b/gcc/m2/tools-src/boilerplate.py
@@ -446,7 +446,7 @@ def visit_dir(startDir, ext, func):
             if (len(fname) > len(ext)) and (fname[-len(ext):] == ext):
                 fullpath = os.path.join(dirName, fname)
                 output_name = fullpath
-                if not (fullpath in seen_files):
+                if fullpath not in seen_files:
                     seen_files += [fullpath]
                     func(fullpath)
             # Remove the first entry in the list of sub-directories
diff --git a/gcc/m2/tools-src/tidydates.py b/gcc/m2/tools-src/tidydates.py
index 0fa28c9f204f..c9e756d4354e 100644
--- a/gcc/m2/tools-src/tidydates.py
+++ b/gcc/m2/tools-src/tidydates.py
@@ -92,7 +92,7 @@ def handle_copyright(outfile, lines, n, leader1, leader2):
             else:
                 seen_date = False
             if seen_date:
-                if not (e in years):
+                if e not in years:
                     c += len(e) + len(punctuation)
                     outfile.write(' ')
                     outfile.write(e)
diff --git a/gcc/regenerate-opt-urls.py b/gcc/regenerate-opt-urls.py
index 666993ea0b93..c47e7b115c68 100755
--- a/gcc/regenerate-opt-urls.py
+++ b/gcc/regenerate-opt-urls.py
@@ -35,10 +35,8 @@ To run unit tests:
 """
 
 import argparse
-import json
 import os
 from pathlib import Path
-from pprint import pprint
 import sys
 import re
 import unittest
diff --git a/gcc/testsuite/g++.dg/modules/test-depfile.py 
b/gcc/testsuite/g++.dg/modules/test-depfile.py
index 9693c058ece6..84e0e1b6cb38 100644
--- a/gcc/testsuite/g++.dg/modules/test-depfile.py
+++ b/gcc/testsuite/g++.dg/modules/test-depfile.py
@@ -1,4 +1,3 @@
-import json
 
 
 # Parameters.
diff --git a/maintainer-scripts/bugzilla-close-candidate.py 
b/maintainer-scripts/bugzilla-close-candidate.py
index cdace9763e79..94c6e738f236 100755
--- a/maintainer-scripts/bugzilla-close-candidate.py
+++ b/maintainer-scripts/bugzilla-close-candidate.py
@@ -21,7 +21,6 @@ https://gcc.gnu.org/bugzilla/buglist.cgi?bug_id=87486
 """
 
 import argparse
-import json
 
 import requests
 
@@ -92,7 +91,7 @@ def search():
                 continue
 
             branches = sorted(list(get_branches_by_comments(comments)),
-                              key=lambda b: 999 if b is 'master' else int(b))
+                              key=lambda b: 999 if b == 'master' else int(b))
             if branches:
                 branches_str = ','.join(branches)
                 print('%-30s%-30s%-40s%-40s%-60s' % 
('https://gcc.gnu.org/PR%d' % id, branches_str, fail, work, b['summary']), 
flush=True)

Reply via email to