Blog

WP_Query Object
(
    [query] => Array
        (
            [news-type] => blog-en
        )

    [query_vars] => Array
        (
            [news-type] => blog-en
            [error] => 
            [m] => 
            [p] => 0
            [post_parent] => 
            [subpost] => 
            [subpost_id] => 
            [attachment] => 
            [attachment_id] => 0
            [name] => 
            [pagename] => 
            [page_id] => 0
            [second] => 
            [minute] => 
            [hour] => 
            [day] => 0
            [monthnum] => 0
            [year] => 0
            [w] => 0
            [category_name] => 
            [tag] => 
            [cat] => 
            [tag_id] => 
            [author] => 
            [author_name] => 
            [feed] => 
            [tb] => 
            [paged] => 0
            [meta_key] => 
            [meta_value] => 
            [preview] => 
            [s] => 
            [sentence] => 
            [title] => 
            [fields] => 
            [menu_order] => 
            [embed] => 
            [category__in] => Array
                (
                )

            [category__not_in] => Array
                (
                )

            [category__and] => Array
                (
                )

            [post__in] => Array
                (
                )

            [post__not_in] => Array
                (
                )

            [post_name__in] => Array
                (
                )

            [tag__in] => Array
                (
                )

            [tag__not_in] => Array
                (
                )

            [tag__and] => Array
                (
                )

            [tag_slug__in] => Array
                (
                )

            [tag_slug__and] => Array
                (
                )

            [post_parent__in] => Array
                (
                )

            [post_parent__not_in] => Array
                (
                )

            [author__in] => Array
                (
                )

            [author__not_in] => Array
                (
                )

            [meta_query] => Array
                (
                )

            [ignore_sticky_posts] => 
            [suppress_filters] => 
            [cache_results] => 1
            [update_post_term_cache] => 1
            [lazy_load_term_meta] => 1
            [update_post_meta_cache] => 1
            [post_type] => 
            [posts_per_page] => 10
            [nopaging] => 
            [comments_per_page] => 50
            [no_found_rows] => 
            [taxonomy] => news-type
            [term] => blog-en
            [order] => DESC
        )

    [tax_query] => WP_Tax_Query Object
        (
            [queries] => Array
                (
                    [0] => Array
                        (
                            [taxonomy] => news-type
                            [terms] => Array
                                (
                                    [0] => blog-en
                                )

                            [field] => slug
                            [operator] => IN
                            [include_children] => 1
                        )

                )

            [relation] => AND
            [table_aliases:protected] => Array
                (
                    [0] => wp_term_relationships
                )

            [queried_terms] => Array
                (
                    [news-type] => Array
                        (
                            [terms] => Array
                                (
                                    [0] => blog-en
                                )

                            [field] => slug
                        )

                )

            [primary_table] => wp_posts
            [primary_id_column] => ID
        )

    [meta_query] => WP_Meta_Query Object
        (
            [queries] => Array
                (
                )

            [relation] => 
            [meta_table] => 
            [meta_id_column] => 
            [primary_table] => 
            [primary_id_column] => 
            [table_aliases:protected] => Array
                (
                )

            [clauses:protected] => Array
                (
                )

            [has_or_relation:protected] => 
        )

    [date_query] => 
    [queried_object] => WP_Term Object
        (
            [term_id] => 71
            [name] => Blog
            [slug] => blog-en
            [term_group] => 0
            [term_taxonomy_id] => 71
            [taxonomy] => news-type
            [description] => 
            [parent] => 0
            [count] => 25
            [filter] => raw
        )

    [queried_object_id] => 71
    [request] => SELECT SQL_CALC_FOUND_ROWS  wp_posts.ID FROM wp_posts  LEFT JOIN wp_term_relationships ON (wp_posts.ID = wp_term_relationships.object_id) LEFT  JOIN wp_icl_translations wpml_translations
							ON wp_posts.ID = wpml_translations.element_id
								AND wpml_translations.element_type = CONCAT('post_', wp_posts.post_type)  WHERE 1=1  AND ( 
  wp_term_relationships.term_taxonomy_id IN (71)
) AND wp_posts.post_type = 'post' AND (wp_posts.post_status = 'publish' OR wp_posts.post_status = 'acf-disabled' OR wp_posts.post_status = 'tribe-ea-success' OR wp_posts.post_status = 'tribe-ea-failed' OR wp_posts.post_status = 'tribe-ea-schedule' OR wp_posts.post_status = 'tribe-ea-pending' OR wp_posts.post_status = 'tribe-ea-draft') AND ( ( ( wpml_translations.language_code = 'en' OR (
					wpml_translations.language_code = 'nl'
					AND wp_posts.post_type IN ( 'attachment' )
					AND ( ( 
			( SELECT COUNT(element_id)
			  FROM wp_icl_translations
			  WHERE trid = wpml_translations.trid
			  AND language_code = 'en'
			) = 0
			 ) OR ( 
			( SELECT COUNT(element_id)
				FROM wp_icl_translations t2
				JOIN wp_posts p ON p.id = t2.element_id
				WHERE t2.trid = wpml_translations.trid
				AND t2.language_code = 'en'
				AND (
					p.post_status = 'publish' OR 
					p.post_type='attachment' AND p.post_status = 'inherit'
				)
			) = 0 ) ) 
				) ) AND wp_posts.post_type  IN ('post','page','attachment','wp_block','acf-field-group','bwl_advanced_faq','tribe_venue','tribe_organizer','tribe_events','mc4wp-form','slider-data','actualiteiten','accordion','failissementens','advocaten','blogs','seminar','juridisch-medewerker' )  ) OR wp_posts.post_type  NOT  IN ('post','page','attachment','wp_block','acf-field-group','bwl_advanced_faq','tribe_venue','tribe_organizer','tribe_events','mc4wp-form','slider-data','actualiteiten','accordion','failissementens','advocaten','blogs','seminar','juridisch-medewerker' )  ) GROUP BY wp_posts.ID ORDER BY wp_posts.menu_order, wp_posts.post_date DESC LIMIT 0, 10
    [posts] => Array
        (
            [0] => WP_Post Object
                (
                    [ID] => 27725
                    [post_author] => 61
                    [post_date] => 2021-11-11 11:38:43
                    [post_date_gmt] => 2021-11-11 10:38:43
                    [post_content] => Software development is a complicated process with many hurdles along the way. That is why virtually every software user will have to deal with a bug or defect at some point. Usually, the flaw can be fixed with a simple update. If the software developer is no longer contractually obliged to release updates, it remains to be seen whether they will voluntarily solve the problem. Under certain – strict – conditions, end users can then take matters into their own hands and decompile the software. This blog takes a closer look at the legal aspects of decompilation, based on the recent judgment of the Court of Justice of the European Union (CJEU).

What is decompilation?

Software is initially written in source code: the programming language that can be read by humans. However, a computer cannot understand these instructions. That is why the source code must first be transcribed into a functional format that the computer can read: object code. This is done by means of a specific program called a compiler. The process of converting source code to object code is known as compilation. With decompilation, the opposite happens. By using a decompiler, the source code is reconstructed from the target code. This does not result in the original source code, but in a 'quasi source code' that is very similar to it. The end user can use this quasi source code to make a version of the software in which the errors have been corrected. However, the option to decompile software does not offer end users carte blanche: it remains a duplication of the software (in modified form) that usually requires permission from the copyright owner. The conditions under which decompiling is permitted are set out by the CJEU in Top Systems v. Belgian State, which will be discussed below.[1]

The preceding events

Top System is a software development company. It has developed several software applications for Selor, an agency that takes care of the selection and recruitment of employees for various Belgian government entities. The software applications made use of functionalities that were derived from the framework work that was developed by Top System. This framework later turned out to be the cause of several operational problems. Selor and Top System were unable to reach an agreement on how to resolve these complications. Eventually, Selor took matters into their own hands and decompiled the target code, after which Selor used the quasi source code to correct most of the errors. In doing so, Selor had to disable a number of functionalities from the other software applications. As the copyright owner, Top System believes that Selor has acted in violation of its exclusive right to reproduce the software.

CJEU: Decompilation is allowed for necessary improvements

The CJEU states that according to Directive 2009/24/EC (hereafter: “the Software Directive”), reproduction and translation of any code form (i.e. decompilation) is an exclusive right of the copyright owner.[1] The lawful purchaser may only decompile the software without prior authorization from the copyright owner, if this is necessary to run the software in accordance with its intended purpose or to correct any errors that prevent said use. Correct errors The lawful purchaser is only allowed to correct errors that impair the ability to use the software in accordance with its intended purpose. Corrections may also consist of disabling certain  functionalities, if this allows the program to be reused for its intended purpose. The CJEU also explicitly distinguishes between decompiling for the purpose of correcting errors and decompiling for the purpose of compatibility. The latter is only allowed if compatibility with another independently created program cannot be achieved in any other way. Necessary Decompilation as measure also needs to be absolutely necessary in order to be able to use the program for its intended purpose. The lawful purchaser is therefore not allowed to use the quasi source code for any other purposes. Decompilation is also unnecessary if and when the original source code is legally or contractually accessible to the lawful purchaser. On the basis of Article 5, paragraph 1 of the Software Directive, both parties can make specific arrangements regarding the way in which errors should be corrected. One could think of an arrangement whereby the software developer maintains and corrects the software for a certain period of time. However, it is not permitted to completely exclude the ability to correct errors by contract.

What does this mean in practice?

The lawful purchaser may decompile a computer program - without prior authorization from the copyright owner - if this is necessary to run the software in accordance with its intended purpose or to correct errors that affect said use. In doing so, the following should be taken into account:
  • Identify if an error is preventing you from using a software application for its intended purpose.
  • Check whether decompiling the software is possible and necessary. Take a closer look at the contractual obligations of both parties and the legal bases on which the lawful purchaser can rely: perhaps the original source code is legally or contractually accessible.
  • When correcting errors it is also allowed to disable certain functions, if those functionalities prevent you from using the software for its intended purpose.
Here you can find the dutch version. [1] CJEU 6th of October 2021, ECLI:EU:C:2021:811 (Top Systems/Belgian State). [2] Article 4 sub a and b jo. 6 paragraph 1 Software Directive. [post_title] => When are you allowed to decompile software? [post_excerpt] => [post_status] => publish [comment_status] => open [ping_status] => open [post_password] => [post_name] => when-are-you-allowed-to-decompile-software [to_ping] => [pinged] => [post_modified] => 2021-11-11 11:55:03 [post_modified_gmt] => 2021-11-11 10:55:03 [post_content_filtered] => [post_parent] => 0 [guid] => https://bg.legal/?p=27725 [menu_order] => 0 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [1] => WP_Post Object ( [ID] => 27674 [post_author] => 6 [post_date] => 2021-11-09 10:11:12 [post_date_gmt] => 2021-11-09 09:11:12 [post_content] => Nowadays it is possible to do everything in the cloud, you just need to have a computer with an internet connection. All you have to do is log in and all the heavy computing is done in the cloud. Once you enter the world of cloud services you will come across a number of different types of services with catchy names like IaaS, PaaS and SaaS. These types are like different animals and they all have their own advantages and disadvantages. In this article I will discuss the differences between on-premise, IaaS, PaaS and SaaS and then explain the legal implications. A general advantage of as a Service (aaS) products is that you don't have to buy and manage the servers yourself. This allows you to install more capacity much faster and you don't need to reserve extra physical space to put servers down. On the other hand, for all aaS services you are dependent on both the provider of the service and a stable internet connection. This may not be a problem for some businesses but can be unacceptable for others.

Software as a Service (SaaS)

The first type is the SaaS. A SaaS is like a goldfish, as long as you keep paying, the service keeps working. You only have to do the minimum amount of maintenance and you don't have to fix most problems yourself. An example of a SaaS is Google Drive. You can store your files in it, but only as Google sees fit. The advantage of a SaaS solution is that as a user you are not responsible for keeping the product running. The provider takes care of the security, the servers and they take care of the maintenance. This makes a SaaS solution the most accessible type of aaS of the three. The advantage of the SaaS can also be a weakness. As a user, you have (almost) no ability to add new features and also little control over how the service works behind the scenes. For example, as a user it is not possible to force Google to store your files within the EU.

Platform as a Service (PaaS)

A PaaS is more open than a SaaS. You can compare a PaaS to a dog, you have work to do on it. With a PaaS you get space on which you can run other programs. An example of this is a remote desktop. You can run all sorts of programs on it, but the provider determines which operating system is installed and when updates are performed. With a PaaS, you as a user have more control. You can install and develop programs yourself and add new functionality as a result. The provider still takes care of the fundamental security of the servers and the operating system and also ensures that the necessary updates are carried out. The disadvantage of the PaaS is that as a user you get more responsibility. You have to be sure that all the programs you install are secure and that you don't make mistakes when developing new programs.

Infrastructure as a Service (IaaS)

An IaaS offers the most freedom you can have without owning servers. It's like having a litter of puppies, you get to and have to raise them yourself. You can choose your own operating system, how you set everything up, what programs you use and what happens. Amazon Web Services (AWS) is an example of this. The advantage of an IaaS is that you have the benefits of controlling servers without having to buy or maintain them yourself. You get an empty shell that you can set up yourself and if you need to, you can make 100 copies of it in minutes. The disadvantage of an IaaS is that it requires more knowledge and skill. This is because you are responsible for everything except the physical server and the internet connection. So for this you need to know how to keep your operating system secure, how to make sure all the programs are set up and more.

On-premise

If you want to take care of everything yourself then you can purchase the software and run it on your own servers. This is a household with three children, but without a school or day-care. You have to do everything yourself and are responsible for everything. On the other hand, you are in control and therefore have the freedom to set everything up the way you want. This is what Google, Amazon and Microsoft do. They only use their own servers.

Comparison

The table at the top of this article provides an overview of which party is responsible for what. Basically, as a customer, you should be able to expect that the part which the service provider takes care of will work properly. If not, then your service provider should be liable for the downtime within the contractual limits. For example, AWS is supposed to be available 24 hours a day. Amazon guarantees that AWS is available 99.99% of the time. That means AWS can have 0,144 minutes of downtime every day. If the downtime is more than this, Amazon will give clients back some of their spent credits. The two big choices to make when deciding on which aaS is for you, are the amount of control you want as a user and the amount of responsibility. The more control you have (or want) over the service, the better you can control what happens. This can be important in relation to the GDPR and also if you perform services for others and have a duty of confidentiality. More control also means that you have more options and can therefore develop your own services. You will not be able to sell a SaaS you buy as a SaaS to others, but you can build your own SaaS on a PaaS. With an on-premise solution you have all the reins in your own hands. If you accept more responsibility, with an IaaS and a PaaS or on-premise hosting, you can also make more mistakes. You cannot hold the provider liable for these mistakes, because their responsibility decreases. If data is deleted by a program you installed, you will have to solve that yourself and your provider will not be able to do much about it. If you host on-premise, then you also have all the responsibility for all physical problems like internet connection and maintenance of servers. In terms of data protection, the type of aaS makes a difference when assessing whether the service provider is a processor or a data controller. The person who determines how personal data is processed is almost always one of the data controllers, so with SaaS it may well be that the service provider and the customer are joint data controllers. In all other solutions, the service provider has no say in how personal data is processed and so here the customer will be the data controller and the service provider a processor.

Conclusion

The different aaS forms each have their advantages and disadvantages. Also legally. How much responsibility and risk are you willing to accept? Does this fit with the corporate/professional liability insurance you have? For questions on this subject, please contact Jos van der Wijst (wijst@bg.legal). Here you can find the dutch version. Robin Verhoef and Jos van der Wijst.Jos van der Wijst   [post_title] => Choices when choosing cloud services [post_excerpt] => [post_status] => publish [comment_status] => open [ping_status] => open [post_password] => [post_name] => choices-when-choosing-cloud-services [to_ping] => [pinged] => [post_modified] => 2021-11-09 10:20:48 [post_modified_gmt] => 2021-11-09 09:20:48 [post_content_filtered] => [post_parent] => 0 [guid] => https://bg.legal/?p=27674 [menu_order] => 0 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [2] => WP_Post Object ( [ID] => 27650 [post_author] => 6 [post_date] => 2021-11-08 14:12:34 [post_date_gmt] => 2021-11-08 13:12:34 [post_content] => The European Commission has published a draft for an AI (Artificial Intelligence) Regulation, since it does not consider current laws and regulations are sufficient for regulating AI systems. The approach of the draft is to choose human-centered AI. Developers of AI systems must assess for themselves in which of the four risk groups their system falls. The higher the risk of an AI system, the higher the requirements for that AI system. It will still take years before the AI Regulation will come into effect. There is also good chance that the draft will be adjusted in the mean time. What does this draft mean for the AI systems that are currently being developed or used? Are there no rules which apply to them? For several types of AI systems, there are already laws and/or regulations that must be complied with. For example:
  • Medical Devices Regulation: for AI systems in medical devices;
  • Constitutions + Human Rights Treaties: for protection of fundamental rights such as freedom of speech, privacy and self-determination;
  • General Data Protection Regulation: when personal data is processed;
  • Product safety regulations: when an AI system causes injury;
  • Consumer protection: when information obligations arise from these regulations;
  • Codes of conduct: when rules (code of conduct) have been established in a sector for AI systems, and
  • Contracts: when parties have established rules for AI systems in an agreement.
In short, even without a specific AI regulation, there are already laws and regulations that an AI system must comply with. For AI developers, this means that they must check which laws and regulations their AI system must comply with and whether they are complient with these rules. If they are not, a customer/client may later claim that the AI system did not meet the legal requirements. This can lead to claims for damages. As a user/client for the development of an AI system you will have to ask similar questions. This applies to supervisors of parties using an AI system.

LegalAIR

General information about legal aspects of AI can be found on our knowledge platform LegalAIR.

AI compliance assessment

For this reason, it is wise for both developers and users of AI systems to determine with an AI compliance assessment which laws and regulations apply to their AI system and whether they are compliant. If they are not met, timely measures can be taken to become compliant. For more questions about this, or for guidance on the performance of an AI compliance assessment, please contact Jos van der Wijst.   [post_title] => We already have rules for AI systems [post_excerpt] => [post_status] => publish [comment_status] => open [ping_status] => open [post_password] => [post_name] => we-already-have-rules-for-ai-systems [to_ping] => [pinged] => [post_modified] => 2021-11-08 14:12:34 [post_modified_gmt] => 2021-11-08 13:12:34 [post_content_filtered] => [post_parent] => 0 [guid] => https://bg.legal/?p=27650 [menu_order] => 0 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [3] => WP_Post Object ( [ID] => 27500 [post_author] => 61 [post_date] => 2021-10-28 15:06:10 [post_date_gmt] => 2021-10-28 13:06:10 [post_content] => In recent years, big data has radically changed the way we live our lives, do business and conduct (scientific) research. This is reflected in the significant rise in demand for large amounts of (personal) data. Strict privacy legislation has reinforced this development, due to the fact that it is widely regarded as the main obstacle for (free) data sharing. Synthetic Data aims to offer a solution to this problem by utilizing AI to generate new, irreducible datasets that replicate the statistical correlations of real-world data. But how anonymous are these data? And can the safeguards of the General Data Protection Regulation (GDPR) truly be circumvented by using this method? In this write-up, we will consider the legal aspects of synthetic data and whether or not they are truly a blessing in disguise for the future of data sharing and, more importantly, our privacy.

What is synthetic data?

Synthetic data are artificially created data that contain many of the correlations and insights of the original dataset, without directly replicating any of the individual entries. This way, data subjects should no longer be identifiable within the new dataset.           Example of a use case for synthetic data.

Anonymisation versus pseudonymisation

Advocates of synthetic data argue that the primary benefit of synthetic data can be found in recital 29 of the GDRP, which states that the principles of data protection do not apply to personal data that have been rendered into anonymous data in such a manner that the data subject is no longer identifiable. Be that as it may, it is still unclear whether synthetic data can actually be regarded as anonymized data. The decisive factor in these conflicting points of view is the likelihood of whether or not the controller or a third party will be able to identify any individual within the dataset, by using all reasonable measures. If this is feasible, the process should be qualified as pseudonymisation, to which the GDPR still applies. To assess whether synthetic data can be qualified as anonymized or pseudonymized data, we need to assess the robustness of the technique that is used. To do that, The Article 29 Data Protection Working Party (WP29) provides us with three criteria that elaborate on the aptitude of an anonymisation method (i.e. generating synthetic data).[1]
  • Singling out: the possibility to distinguish and identify certain individuals within the dataset.
  • Linkability: the ability to link two or more datapoints concerning the same data subject within one or more different datasets.
  • Inference: the possibility to deduce, with significant probability, the value of an attribute from a data subject by using the values given to other attributes within the dataset.
Whether or not synthetic datasets meet the aforementioned criteria will depend on the extent to which the synthetic datapoints will deviate from the original data. In most cases, adding ‘noise’ will not be enough: the AI-framework needs to have a system in place that actively monitors whether ample distance is kept between the newly generated datapoints and the original ones. Even then, it is highly recommended to maintain the possibility for human intervention if and when something goes awry. With all those measures in place, there is an argument to be made to classify synthetic data as anonymous data. In that case, the GDPR will not apply to the (further) processing of synthetic data.

Not completely exempt from the GDPR

However, this does not mean that the safeguards of the GDPR can be completely circumvented by simply utilizing a synthetic dataset. The anonymisation process (i.e. generating the synthetic dataset) inherently qualifies as the processing of personal data, to which the GDPR still applies. Thus, the controller still needs a lawful basis for generating synthetic data, as well as a purpose that is compatible with the initial purpose for which the data have been collected. In most cases, the latter should not pose a problem: the WP29 is of the opinion that anonymisation as an instance of further processing can be considered compatible with the original purpose of processing, on the condition that the modus operandi meets the criteria mentioned above.[2]

Things to keep in mind

Generating synthetic data has a lot of potential, as long as the process is implemented correctly. If you are considering to utilize synthetic data, please take note of the following points:
  • Evaluate whether synthetic data offers a suitable solution for the specific needs of your business or organization. The main advantage of synthetic data lies in their ability to preserve the statistical properties of the original dataset. This attribute provides a lot of utility in certain use cases, such as compute learning and (scientific) research.
  • Assess whether the synthetic datasets deviate enough from the original datasets and adjust the settings of the AI-systems accordingly. In doing so, pay attention to the criteria as laid down by the WP29.
  • Give thought to your obligations under the GDPR. List your lawful basis for processing, as well as the purpose for which this is done.
If you have any questions about the legal aspects of synthetic data and whether or not this anonymisation technique will be suitable for your organization, please do not hesitate to contact one of the experts at BG.Legal.                 [1] WP29, Opinion 05/2014 on Anonymisation Techniques (WP 216), 10th of April 2014, p. 11 and 12. [2] WP29, Opinion 05/2014 on Anonymisation Techniques (WP 216), 10th of April 2014, p. 7. [post_title] => Exploring the legal boundaries of Synthetic Data [post_excerpt] => [post_status] => publish [comment_status] => open [ping_status] => open [post_password] => [post_name] => exploring-the-legal-boundaries-of-synthetic-data [to_ping] => [pinged] => [post_modified] => 2021-10-28 15:06:10 [post_modified_gmt] => 2021-10-28 13:06:10 [post_content_filtered] => [post_parent] => 0 [guid] => https://bg.legal/?p=27500 [menu_order] => 0 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [4] => WP_Post Object ( [ID] => 26698 [post_author] => 6 [post_date] => 2021-08-17 15:01:04 [post_date_gmt] => 2021-08-17 13:01:04 [post_content] => Without us knowing it often, we use products and services on a daily basis where artificial intelligence ("AI") has been applied. Such as speech recognition in the car, chatbots on websites, diagnosis of cancer cells and automated decision-making. Because more and more parties, both commercial parties and governments, are getting more data available, it can be used to make a model in which predictions can be made. AI is used to create that model. For developers of AI applications, clients of developers of AI applications and those who use AI applications, the question then is which laws and regulations an AI application must comply with. Where does an AI application forms a risk, how big or small is that risk and how can a risk be mitigated or eliminated? But also questions about Intellectual Property aspects (is an IP application or the results of an AI application protected by an Intellectual Property right / trade secret), competition aspects (can you refuse to share an AI application with competitors), liability questions (who is liable for damages by / with an AI application) and 'civil law questions' (who is 'owner of the (existing / new) data, who is allowed to do what with the data, what happens to the data/algorithm after the end of the use of an AI application, can I establish a lien on an algorithm / AI application / data set). BG.legal can carry out an AI risk check ("AI Risk Assessment") and come up with an advice on how to mitigate or eliminate any risks.

What does it mean exactly?

Often there are already laws and regulations that apply to AI applications. Such as, for example, the General Data Protection Regulation, the Medical Device Regulation, the Constitution/Charter of Fundamental rights of the European Union and product liability regulations. But for many aspects, there is still no regulation. There is regulation to come, like the proposal for a European AI Regulation. See our blog about this proposal. In an AI Risk Assessment, we analyse for a specific AI application whether it complies with current laws and regulations and with the proposed EU AI Regulation. This means that we assess against the three components:
  1. legal – all applicable laws and regulations are complied with;
  2. ethical – ethical principles and values are respected;
  3. robust – the AI application is robust both from a technical (cyber security) and a social point of view
In the concrete advice we indicate how risks can be eliminated or mitigated.

How it works

To carry out the AI Risk Assessment, we use a model in which we take the following steps:
  1. performing a pre-test: is it necessary to perform an AI Risk Assessment? If the risks are very limited, then perhaps it is not necessary to perform an AI Risk Assessment.
  2. Performing Risk Assessment: together with the client, we determine in advance the team of the client with whom we carry out the assessment, how we will carry it out, whether external parties will be part of the team (ethicists, information security experts, etc.).
  3. After the assessment, the client receives a report in which we have outlined the risks of the AI application in question with recommendations on how risks can be mitigated.
  4. After measures have been taken in which risks have been mitigated, we can carry out the AI Risk Assessment again and issue a new report.
The report can be shared with external parties such as (potential) clients.

Why have bg.legel perform the AI Risk Assessment?

BG.legal has a team consisting of lawyers and a data scientist, which focuses on the legal aspects of data/AI. We have advised clients on these topics for several years. Our clients are companies (startups, scale-ups and SMEs), governments and knowledge institutions. Sometimes they develop AI applications and sometimes they have AI applications developed or they are a customer/user of an AI application. BG.legal has developed the knowledge platform legalAIR (www.legalair.nl).  Jos van der Wijst, head of the BG.tech team, coordinates the activities in the field of legal aspects of AI for the Dutch AI coalition. He is part of the core team Human-oriented AI of the NL AIC. BG.legal has the knowledge and experience to perform an AI Riks Assessment.

What does it cost?

The costs of performing an AI Risk Assessment depend on the nature and size of the AI application. After an initial meeting, we will make a quotation for the costs.

More information?

For more information, please contact Jos van der Wijst: M           : +31 (0)650695916 E             :  wijst@bg.legal Jos van der Wijst [post_title] => Risk check for AI applications [post_excerpt] => [post_status] => publish [comment_status] => open [ping_status] => open [post_password] => [post_name] => risk-check-for-ai-applications [to_ping] => [pinged] => [post_modified] => 2021-11-29 16:15:04 [post_modified_gmt] => 2021-11-29 15:15:04 [post_content_filtered] => [post_parent] => 0 [guid] => https://bg.legal/?p=26698 [menu_order] => 0 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [5] => WP_Post Object ( [ID] => 25550 [post_author] => 6 [post_date] => 2021-05-20 15:39:53 [post_date_gmt] => 2021-05-20 13:39:53 [post_content] => On April 21, 2021, the European Commission presented a proposal with new rules for Artificial Intelligence (AI). The main points in this proposal are explained in this first article.
Why new rules?
AI is already widely used, often without us realizing it. Most AI systems do not pose a risk to users, but that does not apply to all AI systems. Existing regulations are insufficient to guarantee the safety of users and their fundamental rights. This can jeopardize confidence in AI.
Which risk categories?
The European Commission proposes an approach based on risks, with four risk levels: unacceptable risk A very limited number of AI applications are classified as an unacceptable risk. These violate fundamental rights and are therefore prohibited. As an example, the Committee mentions the social labeling of citizens by governments and remote biometric identification in public spaces. A few exceptions have been made to the latter. high risk A slightly larger number of AI applications poses a high risk. These are described in the proposal. They pose a high risk because they have an impact on fundamental rights. The list of these AI applications can be changed over time. These high-risk AI applications must meet several mandatory conditions. These conditions include quality requirements for the dataset used, technical documentation, transparency and information provision to users, human supervision, and robustness, accuracy, and cybersecurity. National supervisors will be given rights to investigate with regard to these requirements. limited risk A larger group of AI applications pose a limited risk. Transparency will suffice here. The committee cites chatbots as an example, with users needing to know that they are communicating with a chatbot. minimal risk For all other AI applications, the existing laws and regulations are sufficient. Most current AI applications fall into this category.
How do you categorize AI products?
The committee has come up with a method for categorizing AI applications in one of the four risk levels. Its purpose is to provide security for companies and others. The risk is assessed based on the intended use. This means that the following factors are looked at: - the intended purpose - the number of people potentially affected - the dependence of the outcome - the irreversibility of the damage
What are the consequences for high-risk AI systems?
Before these high-risk AI systems can be used, their compliance with the regulations must be investigated. This investigation must show that the AI system is compliant with the requirements regarding data quality, documentation and traceability, transparency, human supervision, accuracy and robustness. In case of some AI systems, a "notified body" will have to be involved. A risk management system for these AI systems must also be set up by the supplier.
Who will enforce these rules?
Member States will have to designate an authority to monitor compliance.
Codes of Conduct
Suppliers of high-risk AI systems can create a voluntary code of conduct for the safe application of AI systems. The Commission is encouraging the industry to come up with these codes.
Who is liable when importing AI systems?
The importer of AI systems into the EU is responsible for the imported AI system. It must ensure that the producer is compliant with EU regulations and has a CE mark.
What is the sanction?
Violation of these regulations can be sanctioned with a fine of up to 6% of the annual turnover in the previous calendar year. This was a first analysis of the Commission proposal. A more detailed analysis will follow later. An analysis of the proposed new machinery regulation will also follow later. For more information about legal/ethical aspects of AI we are developing LegalAIR. This platform will provide practical information and tools on how to deal with AI and AI systems. For more questions, please contact Jos van der Wijst (wijst@bg.legal). Jos van der Wijst [post_title] => New European rules for Artificial Intelligence [post_excerpt] => [post_status] => publish [comment_status] => open [ping_status] => open [post_password] => [post_name] => new-european-rules-for-artificial-intelligence [to_ping] => [pinged] => [post_modified] => 2021-05-20 15:44:50 [post_modified_gmt] => 2021-05-20 13:44:50 [post_content_filtered] => [post_parent] => 0 [guid] => https://bg.legal/?p=25550 [menu_order] => 0 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [6] => WP_Post Object ( [ID] => 20406 [post_author] => 19 [post_date] => 2020-04-30 14:29:36 [post_date_gmt] => 2020-04-30 12:29:36 [post_content] => In veel bezwaar- en beroepsprocedures stellen belanghebbenden aantasting van privacy aan de kaak. In de meeste gevallen zonder succes. Er wordt in deze bezwaar- en beroepsprocedures een strak onderscheid gemaakt tussen bestuursrecht en privaatrecht. Dat wil zeggen dat privaatrechtelijke aspecten – zoals privacy – niet van belang zijn bij vergunningverlening. Dat is slechts anders in het geval van een zogenoemde ‘evidente privaatrechtelijke belemmering’. Dat er niet snel sprake is van zo’n evidente privaatrechtelijke belemmering, beschreef ik in een eerder artikel hierover. Uit dit artikel blijkt dat er twee eisen zijn:
  1. Is er sprake van een privaatrechtelijke belemmering?
  2. Is die beperking evident zodat die in de weg staan aan vergunningverlening?
Dat zijn strenge eisen. Deze eisen gelden ook bij een aantasting van privacy. Desondanks is het mogelijk om vergunningverlening tegen te houden met een beroep op aantasting van privacy. Dit blijkt uit een recente uitspraak van de hoogste bestuursrechter van Nederland.
Wat speelde er in die zaak?
Er was door de gemeente Haarlem een vergunning verleend voor het realiseren van een dakterras op een aanbouw. De buren van de vergunninghouder zijn het niet eens met de verleende vergunning. De buren vrezen een aantasting van hun privacy door inkijk in de woonkamer vanaf het dakterras. Zij stellen dat er vanaf het dakterras rechtstreeks zicht is in hun woonkamer. De buren verwijzen naar artikel 5:50 BW. Daarin is bepaald dat het niet geoorloofd is binnen twee meter van de grenslijn van een erf vensters of andere muuropeningen, dan welk balkons of soortelijke werken te hebben voor zover deze op dit erf uitzicht geven. De gemeente stelt dat er van een evidente privaatrechtelijke belemmering geen sprake is, terwijl de buren stellen dat daarvan wel sprake is. Met het uitzicht wordt inbreuk gemaakt op hun privacy en tevens wordt gehandeld in strijd met het verbod van artikel 5:50 BW.
Wat zegt de hoogste bestuursrechter?
De hoogste bestuursrechter verwijst eerst naar de vaste rechtspraak waaruit blijkt dat van een evidente privaatrechtelijke belemmering niet snel sprake is. Vervolgens overweegt de bestuursrechter: “Op basis van de door partijen overgelegde en ter zitting getoonde foto’s stelt de Afdeling vast dat er vanaf de korte zijde van het door vergunninghouder gewenste balkon rechtstreeks zicht is op het naburige erf van appellanten. Omdat de korte zijde van het balkon rechtstreeks uitkijkt op dit erf, volgt de Afdeling het college niet in de stelling dat er om een hoekje moet worden gekeken en een kwartslag moet worden gedraaid om zicht te hebben op dit erf. De korte zijde van het voorziene balkon bevindt zich binnen twee meter van de grens met het erf van appellanten en heeft een open hekwerk met een hoogte van 1,20 m. Daarmee is er sprake van zicht op het erf binnen 2 m van de grenslijn als bedoeld in artikel 5:50 van het BW. Vanaf het balkon is er door de twee glazen lichtkoepels in het dak van de aanbouw van appellanten in ieder geval ’s avonds met kunstmatige verlichting aan rechtstreeks zicht in hun woonkamer. Daargelaten of het zicht vanwege de bolling van de lichtkoepels overdag beperkt is, levert ook zicht dat wegens verlichting in de woonkamer beperkt is tot de avonduren, rechtstreeks zicht op als bedoeld in artikel 5:50 van het BW. Aangezien in het bouwplan geen voorzieningen zijn opgenomen om het rechtstreekse zicht weg te nemen, levert het bouwplan strijd op met artikel 5:50 van het BW. Dit betekent dat sprake is van een evidente privaatrechtelijke belemmering die aan vergunningverlening voor het bouwplan in de weg staat. De rechtbank heeft dit niet onderkend.”
Conclusie en relevantie voor de praktijk
Er is dus sprake van een privaatrechtelijke belemmering. Maar dat niet alleen, de belemmering staat ook in de weg aan vergunningverlening. De belemmering is ‘evident’. De gemeente had de vergunning dus niet mogen verlenen. De reden is dat er niet voldaan is aan de eis van artikel 5:50 BW. Uit deze uitspraak blijkt dat privaatrechtelijke belemmeringen dus zeker van belang kunnen zijn bij vergunningverlening. Dat geldt in het bijzonder voor dit soort ‘harde normen’ die privacy beschermen. Dit soort beperkingen in het burenrecht zal een gemeente dus in de besluitvorming – zeker in bezwaar – moeten meenemen. In een eerdere blog ben ik hier wat verder op ingegaan en heb ik meerdere voorbeelden hiervan genoemd. BG.legal zal deze ontwikkelingen nauwgezet volgen. Heeft u een vraag over privacy of een evidente privaatrechtelijke belemmering? Neem dan gerust vrijblijvend contact met mij op. Rutger Boogers, Advocaat, specialist omgevingsrecht Rutger Boogers         [post_title] => Aantasting van privacy reden om vergunning te weigeren? Jazeker! [post_excerpt] => [post_status] => publish [comment_status] => open [ping_status] => open [post_password] => [post_name] => aantasting-van-privacy-reden-om-vergunning-te-weigeren-jazeker [to_ping] => [pinged] => [post_modified] => 2020-04-30 14:29:36 [post_modified_gmt] => 2020-04-30 12:29:36 [post_content_filtered] => [post_parent] => 0 [guid] => https://bg.legal/?p=20406 [menu_order] => 0 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [7] => WP_Post Object ( [ID] => 19971 [post_author] => 26 [post_date] => 2020-03-30 13:50:57 [post_date_gmt] => 2020-03-30 11:50:57 [post_content] => Data breaches are any occurrences related with the security of personal data, including the loss, alteration or unauthorized disclosure. The definition under the GDPR includes both deliberate acts, such as hacker attacks; or accidents, such as the loss of a pen drive by an organization employee. Because of the GDPR obligation to notify the authorities, we are aware that data breaches recently happened in big companies like Uber and Facebook, which have huge potential and technical skills to invest in security measures. Therefore, it seems that it does not matter the sector, if the company is big or small, the main question is not if a data breach will happen, but when it will happen. Contact us (e-mail) in order to receive our paper with advice regarding action plans to prevent and deal with data breaches.
Introduction: Definition and Obligations under the GDPR
Data breaches are any occurrences related with the security of personal data, including the loss, alteration or unauthorized disclosure. The definition under the GDPR includes both deliberate acts, such as hacker attacks; or accidents, such as the loss of a pen drive by an organization employee. In accordance with Articles 33 and 34 of the GDPR, the responsible for determining the purposes and means of personal data processing (“Controller”) has the obligation to communicate the data breaches to the competent data protection authority (“DPA” or “Authority”) within 72 hours after becoming aware of it and the individuals affected in reasonable time. However, not all of them shall be notified. It shall be analysed if the data breach is likely to result in a risk to the rights and freedoms of any individuals in order to meet the requirement to notify the DPA. On the other hand, the data breach shall be likely to result not only in a risk, but a high risk for the rights and freedoms, in order to require communication to the data subjects. Because of the obligation to notify the Authorities, we are aware that data breaches recently happened in big companies like Uber and Facebook, which have huge potential and technical skills to invest in security measures. Therefore, it seems that it does not matter the sector, if the company is big or small, the main question is not if a data breach will happen, but when it will happen. In this regard, the first important point to mention is the fact that the notification of a data breach does not automatically represent that the company will receive a fine. The European DPAs haven been considered if the security measures in place before the data breach were reasonable in order to prevent data breaches and, when positive, the investigations were concluded without further consequences for the Controllers, except to review and reinforce its security policies. Another option, if the company is in doubt about the risks posed by the data breach, is to notify the DPA in order to receive guidance in relation to the incident. There is also no penalty for the notification of incidents that are end up considered data breaches unlikely to result in risk and, therefore, would not be required to be notified.
Prevention
Besides to the adoption of organisational and technical measures to avoid intrusions in the systems, as well to ensure security in the processes, it is relevant to have a well-structured incident response plans to deal with data breaches when they occur. It is common that the first to identify irregularities are the employees without decision-making power in the organization. Normally such employees do not have an overview of the business and may not understand what risks are related with the incident leading to their decision not to communicate the incident for the superiors. Some employees will also not communicate the incident thinking that they may be considered responsible for the problem. Even when they decide to communicate it, without a defined response plan, the communication flow is normally disorganized and inefficient. In view of the above, one of the basic requirements of an effective response plan is the obligation of communication of operational irregularities related to data protection by employees, as well as the provision of disciplinary measures for omitting any information related to an incident. In addition, it will be necessary to establish a well-structured communication flow leading the information to someone with decision-making power inside of the organization, which may, finally, bring the matter to a pre-selected interdisciplinary committee for data breach situations. Since service providers considered data processors (“Processor”) have the responsibility under the GDPR to inform any data breach to the Controller, the incident response plans shall also include them. It is recommended to include who in the organization shall be informed by the Processor and in which manner.
Communication to Authorities and Data Subjects
GDPR establishes the tasks and powers of the National DPAs, which includes the promotion of public awareness regarding data protection, awareness of Controllers and Processors in relation to the compliance with the GDPR, giving advice on processing operations, among others. In this sense, it is important to highlight that the Controller may benefit from the notification obligation to the national DPA in order to have guidance in how to remediate the risks of a data breach, but also and preferable, before any data breach occurs. The GDPR sets forth the following minimum content to be included in the notification: (i) description of the data breach, including the categories and number of individuals concerned; (ii) contact details of the Data Protection Office or other point of contact inside of the organization; (iii) description of potential consequences of the data breach; and (iv) description of the measures taken or proposed in order to mitigate the possible risks. Despite the minimum content, even if the organization still do not have all the respective information available, the guidelines of the national DPAs indicate to notify the Authority within 72 hours after becoming aware of the incident in order to comply with the timely notification requirement. The lack of the minimum content shall not hinder the timely data breach notification by the Controller. Even after the first notification, it will be possible to complement, amend and even correct the previous information provided together with the reasons of delay to provide such information. This is the recommendation aiming to have Controllers and competent DPAs working together against the risks of the data breach since the first stages. The data breach communication to the data subjects should contain at least the same elements of the notification to the Authority, except for information of the categories and number of individuals concerned which are not required. The main difference between the data breach notification to the authority and the data breach communication to the data subject is that, in the last one, it shall be written in clear and plain language. In view of this specificity, it is recommendable to involve an interdisciplinary team with representants of different areas of the organization, such as the legal team, which will advise in relation to the minimum requirements of the GDPR for this communication; the Information Technology team, which analyses the technical details of the incident; and the communication and marketing team, which have the ability to choose the best strategy to communicate the data breach to the affected persons and write it in a easily understandable manner. In this regard, the involvement of an interdisciplinary team is recommended also in the occasion of the notification to the authority, but it is seems to be even more important in the elaboration of the communication to the data subjects. In case the individual communication to each affected data subject involve disproportionate effort to the Controller, the GDPR provides that it is possible to make public announcements considered equally effective. To achieve this purpose, the interdisciplinary team mentioned above will need to study the best strategy to deliver the communication ensuring its effectiveness.
Mitigation Measures
The GDPR also exempts the necessity to the data breach communicate to data subjects when the Controller has taken measures which neutralize the risks of the data breach. In order to prevent the risks of data breaches, it is recommendable to apply encryption or other techniques avoiding the access of personal data by non authorized individuals. After the data breach, other measures are considered by the DPAs. Besides to a correct data breach notification to the competent Authority and communication to the data subjects, the following measures are considered by DPAs as best practices to deal with data breaches:
  • Avoiding negotiation with criminal hackers involved in the data breach;
  • application of disciplinary measures to employees involved in the data breach in order to avoid reoccurrence or spreading of personal data in power of this employees;
  • opening of disciplinary and judicial proceedings for the same purpose mentioned in item “ii” and for repairing damages;
  • hiring of forensic services when in doubt of a data breach related with the processing activities by a Processor;
  • full internet research with cybersecurity specialists in order to analyze if the personal data was affected, including in the deep web;
  • mandatory change of relevant passwords;
  • collection of correspondences or request erasing of online messages sent to the wrong address;
  • in case of lack of internal expertise related with hacker attacks, seek external advisory;
  • review internal processes in general and raise the employees awareness, specially in relation to that type of data breach.
Conclusion
In view of the above, it is important to highlight that the occurrence of a data breach does not necessarily means the violation of the GDPR and application of penalties by the competent DPA. If the organization have appropriate organisational and technical measures in place, including an incident response plan, as well as adopt measures in order to mitigate the data breach after it happens, the administrative procedures before the DPA may be concluded without further consequences to the organization. In addition, the organization may benefit from guidance of the DPA and from the data breach experience in order to avoid future data breaches of the same nature.       [post_title] => Data breaches under the GDPR [post_excerpt] => [post_status] => publish [comment_status] => open [ping_status] => open [post_password] => [post_name] => data-breaches-under-the-gdpr [to_ping] => [pinged] => [post_modified] => 2020-03-30 13:53:05 [post_modified_gmt] => 2020-03-30 11:53:05 [post_content_filtered] => [post_parent] => 0 [guid] => https://bg.legal/?p=19971 [menu_order] => 0 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [8] => WP_Post Object ( [ID] => 19751 [post_author] => 6 [post_date] => 2020-03-12 11:12:25 [post_date_gmt] => 2020-03-12 10:12:25 [post_content] => The U.S. Food and Drug Administration (FDA) has recently authorized marketing of a software based on Artificial Intelligence (AI) intended to guide medical professionals in capturing quality cardiac ultrasounds used to identify and treat heart diseases. The Software Caption Guidance is based on a machine learning technology that differentiates quality images and non-acceptable images. In addition, it is connected with an AI based interface designed to give described commands to untrained professional about the operation of the ultrasound probe in order to capture relevant images. Considering that heart diseases are one of the most known causes of death in the world and this technology promotes access to effective cardiac diagnostics by professionals without prior experience with ultrasound technologies, it is a potential lifesaving tool. Several AI based medical devices has been analysed and approved by the FDA since 2018. New instruments were included in the premarket submission in order to analyse the transparency and accuracy of the respective AI algorithms. This was discussed in the FDA’s paper launched in April, 2019 ““Proposed Regulatory Framework for Modifications to Artificial Intelligence/Machine Learning (AI/ML)-Based Software as a Medical Device (SaMD) - Discussion Paper and Request for Feedback”. This movement has encouraged even more investments on the sector and has been influencing the European scenario. The European Commission is working on the development of AI regulation in multi-dimensional perspectives and it seems to have concluded why and how regulate it through the publication of “Ethics Guidelines for Trustworthy AI” in April, 2019 by the European Commission’s High-level Expert Group on Artificial Intelligence. The recommendations are related with the principles of ethics, lawfulness and robustness from a technical and societal perspectives. Specifically in relation to the health sector, the Regulation EU 2017/745 on Medical Devices (Medical Devices Regulation), which will be fully applicable in May 2020, provides that software programs created with the clear intention to be used for medical purposes are considered medical devices. Therefore, AI based health technologies helping to decide on treatment of diseases through prediction or prognosis usually fall under this definition. In this regard, while different sectors are pressing for a practical regulation for AI, the European health sector has been mentioned as one possible case in which pre existing regulation, such as the Medical Device Regulation and its certification process, may be enough to keep up with AI based technologies. Despite the fact that medical devices are regulated by national authorities, the European Medicine Agency (EMA) is the responsible for assessment, authorization and supervision of certain categories in accordance with the European legislation. Considering the potential of innovative technologies to transform healthcare, including AI based medical devices, as well as the risks it raises, EMA has joined an European task force involving the matter[1] and has launched its main strategic goals[2], including the exploitation of digital technologies and artificial intelligence in decision making. Besides to develop expertise to engage with digital technology, artificial intelligence and cognitive computing, EMA’s idea is to create an AI test laboratory to explore application of AI based technologies which support data driven decisions. In general, the main European concerns about AI are related with transparency and accountability considering the complexity of the respective algorithms, but specially, the identification of unlawful biases and prejudicial elements. In this regard, health data breaches and AI decision making based on sensitive data such as health data may lead to discrimination and is considered of a huge risk. In addition, the Medical Devices Regulation mentions requirement such as informed consent, transparency, access to information and provision of accessible and essential information about the device to the patient.  Therefore, its recommendable at least to demonstrate the efforts to overcome the challenges related with AI mentioned above in the submissions for approvals of medical devices to EMA. It can be tackled by presenting predictable and verifiable algorithms, a clear understanding of the categories of data used in the project and the implementation of regular audits and procedures to avoid discrimination, errors and inaccuracies. In view of the above, EMA seems still be searching an adequate approach to ensure that AI based innovative technologies are effective and appropriate to support medical decisions, as well as to fit AI in the existing regulatory framework in a manner that these technologies are supported by the society. Notwithstanding, EMA has been supporting initiatives to explore AI and already approved investigations researches based on artificial intelligence, such as the pediatric investigation plan for PXT3003 by Pharnext company[3], which demonstrates it is open to discuss AI based projects. As included in a recent article written by Daniel Walch, director of groupement hospitalier de l’Ouest lémanique (GHOL) and by Xavier Comtesse, head of the first Think Tank in Switzerland and PHD in Computer Science: “Artificial intelligence will not replace doctors. But the doctors who will use AI will replace those who will not do it”. Therefore, it will be key to have a more practical approach in relation to the approval of AI medical devices for the promotion of innovation and trust in the European health sector, specially upon May, 2020, with the full applicability of the Medical Devices Regulation. [1] HMA-EMA Joint Big Data Taskforce https://www.ema.europa.eu/en/documents/minutes/hma/ema-joint-task-force-big-data-summary-report_en.pdf [2] EMA Regulatory Science to 2025 https://www.ema.europa.eu/en/documents/regulatory-procedural-guideline/ema-regulatory-science-2025-strategic-reflection_en.pdf [3] European Medicines Agency Agrees with Pharnext’s Pediatric Investigation Plan for PXT3003 https://www.pharnext.com/images/PDF/press_releases/2018.07.10_PIP_agreement_EN.pdf BG.tech [post_title] => European Perspectives on AI Medical Devices [post_excerpt] => [post_status] => publish [comment_status] => open [ping_status] => open [post_password] => [post_name] => europese-perspectieven-op-ai-medische-hulpmiddelen [to_ping] => [pinged] => [post_modified] => 2020-03-12 11:40:15 [post_modified_gmt] => 2020-03-12 10:40:15 [post_content_filtered] => [post_parent] => 0 [guid] => https://bg.legal/?p=19751 [menu_order] => 0 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [9] => WP_Post Object ( [ID] => 18547 [post_author] => 6 [post_date] => 2019-10-21 14:31:31 [post_date_gmt] => 2019-10-21 12:31:31 [post_content] => At 20 October 2019, Jos van der Wijst gave a presentation about legal aspects of blockchain cases in food. Jos is technology lawyer with a focus on the Food sector. The presentation was part of the program of the Den Bosch Data Week and was held in the Jheronimus Academy of Data Science. The next presentation of Jos will at the Agri Food Tech conference, 11 December 2019 in Den Bosch. Jos will give a presentation about collaboration in tech innovations in Food. The title of the presentation is: “Don’t let legal be a troublemaker in a collaboration”. The slides of the presentation are available here. Blockchain and Food [post_title] => Den Bosch Data Week: Practical experiences Blockchain and Food [post_excerpt] => [post_status] => publish [comment_status] => open [ping_status] => open [post_password] => [post_name] => den-bosch-data-week-practical-experiences-blockchain-and-food [to_ping] => [pinged] => [post_modified] => 2019-10-30 16:40:07 [post_modified_gmt] => 2019-10-30 15:40:07 [post_content_filtered] => [post_parent] => 0 [guid] => https://bg.legal/?p=18547 [menu_order] => 0 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) ) [post_count] => 10 [current_post] => -1 [in_the_loop] => [post] => WP_Post Object ( [ID] => 27725 [post_author] => 61 [post_date] => 2021-11-11 11:38:43 [post_date_gmt] => 2021-11-11 10:38:43 [post_content] => Software development is a complicated process with many hurdles along the way. That is why virtually every software user will have to deal with a bug or defect at some point. Usually, the flaw can be fixed with a simple update. If the software developer is no longer contractually obliged to release updates, it remains to be seen whether they will voluntarily solve the problem. Under certain – strict – conditions, end users can then take matters into their own hands and decompile the software. This blog takes a closer look at the legal aspects of decompilation, based on the recent judgment of the Court of Justice of the European Union (CJEU).

What is decompilation?

Software is initially written in source code: the programming language that can be read by humans. However, a computer cannot understand these instructions. That is why the source code must first be transcribed into a functional format that the computer can read: object code. This is done by means of a specific program called a compiler. The process of converting source code to object code is known as compilation. With decompilation, the opposite happens. By using a decompiler, the source code is reconstructed from the target code. This does not result in the original source code, but in a 'quasi source code' that is very similar to it. The end user can use this quasi source code to make a version of the software in which the errors have been corrected. However, the option to decompile software does not offer end users carte blanche: it remains a duplication of the software (in modified form) that usually requires permission from the copyright owner. The conditions under which decompiling is permitted are set out by the CJEU in Top Systems v. Belgian State, which will be discussed below.[1]

The preceding events

Top System is a software development company. It has developed several software applications for Selor, an agency that takes care of the selection and recruitment of employees for various Belgian government entities. The software applications made use of functionalities that were derived from the framework work that was developed by Top System. This framework later turned out to be the cause of several operational problems. Selor and Top System were unable to reach an agreement on how to resolve these complications. Eventually, Selor took matters into their own hands and decompiled the target code, after which Selor used the quasi source code to correct most of the errors. In doing so, Selor had to disable a number of functionalities from the other software applications. As the copyright owner, Top System believes that Selor has acted in violation of its exclusive right to reproduce the software.

CJEU: Decompilation is allowed for necessary improvements

The CJEU states that according to Directive 2009/24/EC (hereafter: “the Software Directive”), reproduction and translation of any code form (i.e. decompilation) is an exclusive right of the copyright owner.[1] The lawful purchaser may only decompile the software without prior authorization from the copyright owner, if this is necessary to run the software in accordance with its intended purpose or to correct any errors that prevent said use. Correct errors The lawful purchaser is only allowed to correct errors that impair the ability to use the software in accordance with its intended purpose. Corrections may also consist of disabling certain  functionalities, if this allows the program to be reused for its intended purpose. The CJEU also explicitly distinguishes between decompiling for the purpose of correcting errors and decompiling for the purpose of compatibility. The latter is only allowed if compatibility with another independently created program cannot be achieved in any other way. Necessary Decompilation as measure also needs to be absolutely necessary in order to be able to use the program for its intended purpose. The lawful purchaser is therefore not allowed to use the quasi source code for any other purposes. Decompilation is also unnecessary if and when the original source code is legally or contractually accessible to the lawful purchaser. On the basis of Article 5, paragraph 1 of the Software Directive, both parties can make specific arrangements regarding the way in which errors should be corrected. One could think of an arrangement whereby the software developer maintains and corrects the software for a certain period of time. However, it is not permitted to completely exclude the ability to correct errors by contract.

What does this mean in practice?

The lawful purchaser may decompile a computer program - without prior authorization from the copyright owner - if this is necessary to run the software in accordance with its intended purpose or to correct errors that affect said use. In doing so, the following should be taken into account:
  • Identify if an error is preventing you from using a software application for its intended purpose.
  • Check whether decompiling the software is possible and necessary. Take a closer look at the contractual obligations of both parties and the legal bases on which the lawful purchaser can rely: perhaps the original source code is legally or contractually accessible.
  • When correcting errors it is also allowed to disable certain functions, if those functionalities prevent you from using the software for its intended purpose.
Here you can find the dutch version. [1] CJEU 6th of October 2021, ECLI:EU:C:2021:811 (Top Systems/Belgian State). [2] Article 4 sub a and b jo. 6 paragraph 1 Software Directive. [post_title] => When are you allowed to decompile software? [post_excerpt] => [post_status] => publish [comment_status] => open [ping_status] => open [post_password] => [post_name] => when-are-you-allowed-to-decompile-software [to_ping] => [pinged] => [post_modified] => 2021-11-11 11:55:03 [post_modified_gmt] => 2021-11-11 10:55:03 [post_content_filtered] => [post_parent] => 0 [guid] => https://bg.legal/?p=27725 [menu_order] => 0 [post_type] => post [post_mime_type] => [comment_count] => 0 [filter] => raw ) [comment_count] => 0 [current_comment] => -1 [found_posts] => 25 [max_num_pages] => 3 [max_num_comment_pages] => 0 [is_single] => [is_preview] => [is_page] => [is_archive] => 1 [is_date] => [is_year] => [is_month] => [is_day] => [is_time] => [is_author] => [is_category] => [is_tag] => [is_tax] => 1 [is_search] => [is_feed] => [is_comment_feed] => [is_trackback] => [is_home] => [is_privacy_policy] => [is_404] => [is_embed] => [is_paged] => [is_admin] => [is_attachment] => [is_singular] => [is_robots] => [is_favicon] => [is_posts_page] => [is_post_type_archive] => [query_vars_hash:WP_Query:private] => 67d48224e84c62fb276234f54eeafc96 [query_vars_changed:WP_Query:private] => 1 [thumbnails_cached] => [stopwords:WP_Query:private] => [compat_fields:WP_Query:private] => Array ( [0] => query_vars_hash [1] => query_vars_changed ) [compat_methods:WP_Query:private] => Array ( [0] => init_query_flags [1] => parse_tax_query ) [tribe_is_event] => [tribe_is_multi_posttype] => [tribe_is_event_category] => [tribe_is_event_venue] => [tribe_is_event_organizer] => [tribe_is_event_query] => [tribe_is_past] => )
Software development is a complicated process with many hurdles along the way. That is why virtually every software user will have to deal with a bug or defect at some...
Lees meer
Nowadays it is possible to do everything in the cloud, you just need to have a computer with an internet connection. All you have to do is log in and...
Lees meer
The European Commission has published a draft for an AI (Artificial Intelligence) Regulation, since it does not consider current laws and regulations are sufficient for regulating AI systems. The approach...
Lees meer
In recent years, big data has radically changed the way we live our lives, do business and conduct (scientific) research. This is reflected in the significant rise in demand for...
Lees meer
Without us knowing it often, we use products and services on a daily basis where artificial intelligence ("AI") has been applied. Such as speech recognition in the car, chatbots on...
Lees meer
On April 21, 2021, the European Commission presented a proposal with new rules for Artificial Intelligence (AI). The main points in this proposal are explained in this first article. Why...
Lees meer
In veel bezwaar- en beroepsprocedures stellen belanghebbenden aantasting van privacy aan de kaak. In de meeste gevallen zonder succes. Er wordt in deze bezwaar- en beroepsprocedures een strak onderscheid gemaakt...
Lees meer
30 Mar 2020
BG.legal
Data breaches are any occurrences related with the security of personal data, including the loss, alteration or unauthorized disclosure. The definition under the GDPR includes both deliberate acts, such as...
Lees meer
The U.S. Food and Drug Administration (FDA) has recently authorized marketing of a software based on Artificial Intelligence (AI) intended to guide medical professionals in capturing quality cardiac ultrasounds used...
Lees meer
At 20 October 2019, Jos van der Wijst gave a presentation about legal aspects of blockchain cases in food. Jos is technology lawyer with a focus on the Food sector....
Lees meer